function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
|---|---|---|---|---|---|---|
__getattribute__
|
When getting a filter attribute, looks for the corresponding filter
state and returns that instead of the filter object. If none is found,
looks for the default value on the filter object. If that's not found
either, then returns none.
|
from django import forms
from django.http import QueryDict
from django.forms.formsets import formset_factory
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from datetime import date
import itertools
import re
from fields import SubmitButtonField, SubmitButtonWidget
class Filter(object):
__metaclass__ = ABCMeta
_order = itertools.count()
form_field_class = None
form_field_widget = None
filter_state_names = ['%s', ]
filter_field = ''
def __init__(self,
default=None,
required=False,
label=None,
form_field_class=None,
form_field_widget=None,
filter_set=False,
filter_field=None):
self.default = default
self.required = required
self.label = label
self.form_field_class = form_field_class or self.form_field_class
self.form_field_widget = form_field_widget or self.form_field_widget
self.order = Filter._order.next()
self.filter_set = filter_set
self.filter_field = filter_field or self.filter_field
def get_form_field(self):
"""
Returns an instance of the form field class, used for constructing the
filter form for a report.
"""
return self.form_field_class(required=(self.required and not self.filter_set),
widget=self.form_field_widget,
label=self.label)
def get_form_class(self, name, index=0, postfix="Form"):
form_class_name = "%s%s" % (type(self).__name__, postfix)
form_class_dict = {name: self.get_form_field()}
return type(form_class_name, (forms.Form,), form_class_dict)
def clean_data(self, name, raw_data):
form = self.get_form_class(name)(data=raw_data)
return form.cleaned_data[name] if form.is_valid() else None
def get_data(self, name, data):
"""
To get the data for this filter given the filter sets, we instantiate
the form with the data, validate it, and return the cleaned data.
"""
cleaned_data = self.clean_data(name, data)
return cleaned_data if cleaned_data else self.default
def get_data_set(self, name, data):
"""
This horribly ugly little function is in charge of returning a list of
data entries, given filter states, for a filter set. It does the same
thing as get_data, but for every item in a filter set, returning the
results in a list.
"""
# If we're not really a set, just return a 1-element list with the data
if not self.filter_set:
return [self.get_data(name, data)]
# Get the deletion field name and index
delete = data.get('delete', None)
delete_index = None
if delete:
n, i = delete.split('.')
if n == name:
delete_index = int(i) + 1
# Zip together all the lists of filter state values. This gives us a
# list of tuples of filter state fields. Ugly but necessary in case we
# have a filter which generates a MultiValueField (aka,
# NumericComparisonFilter). Exclude elements which have been deleted.
filter_state_names = self.filter_state_names[:]
filter_state_list = [data.getlist(state_name % name, []) for state_name in filter_state_names]
filter_states = zip(*filter_state_list)
# Loop over every filter state tuple, converting it to a mini filter-
# -state dict. Clean it, and store the cleaned data in a list
data_set = []
for i in range(len(filter_states)):
# If this index is getting deleted, don't add it
if i == delete_index:
continue
# Get the dict of states for this filter set element
state = filter_states[i]
filter_dict = {}
for i in range(0, len(filter_state_names)):
filter_dict.update({filter_state_names[i] % name: state[i]})
# Clean and validate the set instance data. If it validates, store
# it in the state list.
cleaned_data = self.clean_data(name, filter_dict)
if cleaned_data:
data_elem = cleaned_data
data_set.append(data_elem)
# Return the list of states
return data_set
def get_filter_state_from_data(self, name, data):
"""
Another nasty little bit. This one (if not overridden) takes some
data and encodes it, using the filter state names, to be a valid
filter_state which would return the original data if passed to get_data
TODO: Make sure this actually works for stuff other than
NumericComparisonFilter
TODO: Add good comments :P
"""
if len(self.filter_state_names) > 1:
if not (hasattr(data, '__iter__') and len(self.filter_state_names) == len(data)):
raise Exception()
state = {}
for i in range(0, len(data)):
state.update({self.filter_state_names[i] % name: data[i]})
return state
else:
return {self.filter_state_names[0] % name: data}
def apply_filter(self, queryset, data):
filterspec = {self.filter_field: data}
return queryset.filter(**filterspec)
def apply_filter_set(self, queryset, data_set):
# Apply the filter to the queryset based on each entry in the data set
for data in data_set:
queryset = self.apply_filter(queryset, data)
return queryset
class Report(object):
__metaclass__ = ABCMeta
headers = None
footers = None
title = None
def __init__(self, filter_states={}):
"""
filter_state will be a querydict with keys corresponding to the names
of the filter members on this report object.
"""
if isinstance(filter_states, QueryDict):
self.filter_states = filter_states
else:
self.filter_states = QueryDict('', mutable=True)
self.filter_states.update(filter_states)
self.title = self.title or self.get_title_from_class_name()
# MASKED: __getattribute__ function (lines 170-191)
def get_title_from_class_name(self):
"""
Split the class name into words, delimited by capitals.
"""
words = re.split(r'([A-Z])', self.__class__.__name__)[1:]
words = [words[i] + words[i+1] for i in range(0, len(words) - 1, 2)]
return ' '.join(words)
def get_filter(self, name):
"""
Perform the normal __getattribute__ call,
and return it if it's a filter
"""
attr = object.__getattribute__(self, name)
return attr if issubclass(type(attr), Filter) else None
def get_filters(self):
"""
Return a list of all the names and attributes on this report instance
which have a base class of Filter.
"""
filters = []
for name in dir(self):
attr = object.__getattribute__(self, name)
if issubclass(type(attr), Filter):
filters.append((name, attr))
return sorted(filters, key=lambda attr: attr[1].order)
def get_filter_forms(self):
for name, attr in self.get_filters():
# If it is a filter set, loop through the existing list of data
# in the filter states, if there are any. For each of these, make a
# sub-form which includes a "delete" checkbox
if attr.filter_set:
# Get the new-set element form
form = attr.get_form_class(name)()
form.name = name
yield form
# Yield all the existing form elements
data_set = attr.get_data_set(name, self.filter_states)
for i in range(len(data_set)):
data = data_set[i]
state = attr.get_filter_state_from_data(name, data)
# Generate and yield a form containing the filter's field,
# as well as a deleting submit field to mark deletions
form = attr.get_form_class(
name=name,
postfix="FormSetElem"
)(data=state)
form.delete = {
'filter': name,
'index': i}
form.name = name
yield form
# If it ain't a filter set, just get it's form class and render it
# with the filter state data
else:
form = attr.get_form_class(name)(data=self.filter_states)
form.name = name
yield form
def get_title(self):
return self.title
def get_headers(self):
return self.headers
def get_footers(self):
return self.footers
def apply_filter(self, queryset, name):
f = self.get_filter(name)
# If it's not a filterset, just get the regular data and apply it
if not f.filter_set:
data = f.get_data(name, self.filter_states)
if data:
return f.apply_filter(queryset, data)
# Otherwise, get the full data set and apply it
else:
data_set = f.get_data_set(name, self.filter_states)
if len(data_set) > 0:
return f.apply_filter_set(queryset, data_set)
# If we weren't able to apply the filter, return the raw queryset
return queryset
def apply_filters(self, queryset, names=None, excludes=[]):
for name, f in self.get_filters():
# Only apply this filter if it's selected
if name in excludes or (names and name not in names):
continue
# Apply this filter
queryset = self.apply_filter(queryset, name)
# Return the filtered queryset
return queryset
def get_queryset(self):
return []
def get_row(self, item):
"""
This can return a list for simple data that doesn't need special
template rendering, or a dict for more complex data where individual
fields will need to be rendered specially.
"""
return []
def get_rows(self):
rows = []
for item in self.get_queryset():
row = self.get_row(item)
if row:
rows.append(row)
return rows
def get_count(self):
return self.get_queryset().count()
def get_table(self):
return [[cell for cell in row] for row in self.get_rows()]
@staticmethod
def encode_filter_states(data):
"""
Converts a normal POST querydict to the filterstate data,
to be stored in the url
"""
#data = QueryDict(data.urlencode(), mutable=True)
return data
@staticmethod
def decode_filter_states(data):
"""
Opposite of encode_filter_states
"""
return data
class Row(object):
def __init__(self, list, attrs=None):
self.list = list
if attrs:
for name, value in attrs.iteritems():
setattr(self, name, value)
def __iter__(self):
return self.list.__iter__()
|
def __getattribute__(self, name):
"""
When getting a filter attribute, looks for the corresponding filter
state and returns that instead of the filter object. If none is found,
looks for the default value on the filter object. If that's not found
either, then returns none.
"""
# Perform the normal __getattribute__ call
attr = object.__getattribute__(self, name)
# If it's a filter attribute...
if issubclass(type(attr), Filter):
# If we have a filter state for this filter, convert it to the type
# of data for this filter.
if not attr.filter_set:
return attr.get_data(name, self.filter_states)
else:
return attr.get_data_set(name, self.filter_states)
# This isn't a filter, just return the attribute
return attr
| 170
| 191
|
from django import forms
from django.http import QueryDict
from django.forms.formsets import formset_factory
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from datetime import date
import itertools
import re
from fields import SubmitButtonField, SubmitButtonWidget
class Filter(object):
__metaclass__ = ABCMeta
_order = itertools.count()
form_field_class = None
form_field_widget = None
filter_state_names = ['%s', ]
filter_field = ''
def __init__(self,
default=None,
required=False,
label=None,
form_field_class=None,
form_field_widget=None,
filter_set=False,
filter_field=None):
self.default = default
self.required = required
self.label = label
self.form_field_class = form_field_class or self.form_field_class
self.form_field_widget = form_field_widget or self.form_field_widget
self.order = Filter._order.next()
self.filter_set = filter_set
self.filter_field = filter_field or self.filter_field
def get_form_field(self):
"""
Returns an instance of the form field class, used for constructing the
filter form for a report.
"""
return self.form_field_class(required=(self.required and not self.filter_set),
widget=self.form_field_widget,
label=self.label)
def get_form_class(self, name, index=0, postfix="Form"):
form_class_name = "%s%s" % (type(self).__name__, postfix)
form_class_dict = {name: self.get_form_field()}
return type(form_class_name, (forms.Form,), form_class_dict)
def clean_data(self, name, raw_data):
form = self.get_form_class(name)(data=raw_data)
return form.cleaned_data[name] if form.is_valid() else None
def get_data(self, name, data):
"""
To get the data for this filter given the filter sets, we instantiate
the form with the data, validate it, and return the cleaned data.
"""
cleaned_data = self.clean_data(name, data)
return cleaned_data if cleaned_data else self.default
def get_data_set(self, name, data):
"""
This horribly ugly little function is in charge of returning a list of
data entries, given filter states, for a filter set. It does the same
thing as get_data, but for every item in a filter set, returning the
results in a list.
"""
# If we're not really a set, just return a 1-element list with the data
if not self.filter_set:
return [self.get_data(name, data)]
# Get the deletion field name and index
delete = data.get('delete', None)
delete_index = None
if delete:
n, i = delete.split('.')
if n == name:
delete_index = int(i) + 1
# Zip together all the lists of filter state values. This gives us a
# list of tuples of filter state fields. Ugly but necessary in case we
# have a filter which generates a MultiValueField (aka,
# NumericComparisonFilter). Exclude elements which have been deleted.
filter_state_names = self.filter_state_names[:]
filter_state_list = [data.getlist(state_name % name, []) for state_name in filter_state_names]
filter_states = zip(*filter_state_list)
# Loop over every filter state tuple, converting it to a mini filter-
# -state dict. Clean it, and store the cleaned data in a list
data_set = []
for i in range(len(filter_states)):
# If this index is getting deleted, don't add it
if i == delete_index:
continue
# Get the dict of states for this filter set element
state = filter_states[i]
filter_dict = {}
for i in range(0, len(filter_state_names)):
filter_dict.update({filter_state_names[i] % name: state[i]})
# Clean and validate the set instance data. If it validates, store
# it in the state list.
cleaned_data = self.clean_data(name, filter_dict)
if cleaned_data:
data_elem = cleaned_data
data_set.append(data_elem)
# Return the list of states
return data_set
def get_filter_state_from_data(self, name, data):
"""
Another nasty little bit. This one (if not overridden) takes some
data and encodes it, using the filter state names, to be a valid
filter_state which would return the original data if passed to get_data
TODO: Make sure this actually works for stuff other than
NumericComparisonFilter
TODO: Add good comments :P
"""
if len(self.filter_state_names) > 1:
if not (hasattr(data, '__iter__') and len(self.filter_state_names) == len(data)):
raise Exception()
state = {}
for i in range(0, len(data)):
state.update({self.filter_state_names[i] % name: data[i]})
return state
else:
return {self.filter_state_names[0] % name: data}
def apply_filter(self, queryset, data):
filterspec = {self.filter_field: data}
return queryset.filter(**filterspec)
def apply_filter_set(self, queryset, data_set):
# Apply the filter to the queryset based on each entry in the data set
for data in data_set:
queryset = self.apply_filter(queryset, data)
return queryset
class Report(object):
__metaclass__ = ABCMeta
headers = None
footers = None
title = None
def __init__(self, filter_states={}):
"""
filter_state will be a querydict with keys corresponding to the names
of the filter members on this report object.
"""
if isinstance(filter_states, QueryDict):
self.filter_states = filter_states
else:
self.filter_states = QueryDict('', mutable=True)
self.filter_states.update(filter_states)
self.title = self.title or self.get_title_from_class_name()
def __getattribute__(self, name):
"""
When getting a filter attribute, looks for the corresponding filter
state and returns that instead of the filter object. If none is found,
looks for the default value on the filter object. If that's not found
either, then returns none.
"""
# Perform the normal __getattribute__ call
attr = object.__getattribute__(self, name)
# If it's a filter attribute...
if issubclass(type(attr), Filter):
# If we have a filter state for this filter, convert it to the type
# of data for this filter.
if not attr.filter_set:
return attr.get_data(name, self.filter_states)
else:
return attr.get_data_set(name, self.filter_states)
# This isn't a filter, just return the attribute
return attr
def get_title_from_class_name(self):
"""
Split the class name into words, delimited by capitals.
"""
words = re.split(r'([A-Z])', self.__class__.__name__)[1:]
words = [words[i] + words[i+1] for i in range(0, len(words) - 1, 2)]
return ' '.join(words)
def get_filter(self, name):
"""
Perform the normal __getattribute__ call,
and return it if it's a filter
"""
attr = object.__getattribute__(self, name)
return attr if issubclass(type(attr), Filter) else None
def get_filters(self):
"""
Return a list of all the names and attributes on this report instance
which have a base class of Filter.
"""
filters = []
for name in dir(self):
attr = object.__getattribute__(self, name)
if issubclass(type(attr), Filter):
filters.append((name, attr))
return sorted(filters, key=lambda attr: attr[1].order)
def get_filter_forms(self):
for name, attr in self.get_filters():
# If it is a filter set, loop through the existing list of data
# in the filter states, if there are any. For each of these, make a
# sub-form which includes a "delete" checkbox
if attr.filter_set:
# Get the new-set element form
form = attr.get_form_class(name)()
form.name = name
yield form
# Yield all the existing form elements
data_set = attr.get_data_set(name, self.filter_states)
for i in range(len(data_set)):
data = data_set[i]
state = attr.get_filter_state_from_data(name, data)
# Generate and yield a form containing the filter's field,
# as well as a deleting submit field to mark deletions
form = attr.get_form_class(
name=name,
postfix="FormSetElem"
)(data=state)
form.delete = {
'filter': name,
'index': i}
form.name = name
yield form
# If it ain't a filter set, just get it's form class and render it
# with the filter state data
else:
form = attr.get_form_class(name)(data=self.filter_states)
form.name = name
yield form
def get_title(self):
return self.title
def get_headers(self):
return self.headers
def get_footers(self):
return self.footers
def apply_filter(self, queryset, name):
f = self.get_filter(name)
# If it's not a filterset, just get the regular data and apply it
if not f.filter_set:
data = f.get_data(name, self.filter_states)
if data:
return f.apply_filter(queryset, data)
# Otherwise, get the full data set and apply it
else:
data_set = f.get_data_set(name, self.filter_states)
if len(data_set) > 0:
return f.apply_filter_set(queryset, data_set)
# If we weren't able to apply the filter, return the raw queryset
return queryset
def apply_filters(self, queryset, names=None, excludes=[]):
for name, f in self.get_filters():
# Only apply this filter if it's selected
if name in excludes or (names and name not in names):
continue
# Apply this filter
queryset = self.apply_filter(queryset, name)
# Return the filtered queryset
return queryset
def get_queryset(self):
return []
def get_row(self, item):
"""
This can return a list for simple data that doesn't need special
template rendering, or a dict for more complex data where individual
fields will need to be rendered specially.
"""
return []
def get_rows(self):
rows = []
for item in self.get_queryset():
row = self.get_row(item)
if row:
rows.append(row)
return rows
def get_count(self):
return self.get_queryset().count()
def get_table(self):
return [[cell for cell in row] for row in self.get_rows()]
@staticmethod
def encode_filter_states(data):
"""
Converts a normal POST querydict to the filterstate data,
to be stored in the url
"""
#data = QueryDict(data.urlencode(), mutable=True)
return data
@staticmethod
def decode_filter_states(data):
"""
Opposite of encode_filter_states
"""
return data
class Row(object):
def __init__(self, list, attrs=None):
self.list = list
if attrs:
for name, value in attrs.iteritems():
setattr(self, name, value)
def __iter__(self):
return self.list.__iter__()
|
mailman_add
|
Add a Satchmo contact to a mailman mailing list.
Parameters:
- `Contact`: A Satchmo Contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `send_welcome_msg`: True or False, defaulting to the list default
- `admin_notify`: True of False, defaulting to the list default
|
"""A Mailman newsletter subscription interface.
To use this plugin, enable the newsletter module and set the newsletter module and name settings
in the admin settings page.
"""
from django.utils.translation import ugettext as _
from Mailman import MailList, Errors
from models import Subscription
from satchmo.configuration import config_value
import logging
import sys
log = logging.getLogger('newsletter.mailman')
class UserDesc: pass
def is_subscribed(contact):
return Subscription.email_is_subscribed(contact.email)
def update_contact(contact, subscribe, attributes={}):
email = contact.email
current = Subscription.email_is_subscribed(email)
attributesChanged = False
sub = None
if attributes:
sub, created = Subscription.objects.get_or_create(email=email)
if created:
attributesChanged = True
else:
oldAttr = [(a.name,a.value) for a in sub.attributes.all()]
oldAttr.sort()
sub.update_attributes(attributes)
newAttr = [(a.name,a.value) for a in sub.attributes.all()]
newAttr.sort()
if not created:
attributesChanged = oldAttr != newAttr
if current == subscribe:
if subscribe:
if attributesChanged:
result = _("Updated subscription for %(email)s.")
else:
result = _("Already subscribed %(email)s.")
else:
result = _("Already removed %(email)s.")
else:
if not sub:
sub, created = Subscription.objects.get_or_create(email=email)
sub.subscribed = subscribe
sub.save()
if subscribe:
mailman_add(contact)
result = _("Subscribed: %(email)s")
else:
mailman_remove(contact)
result = _("Unsubscribed: %(email)s")
return result % { 'email' : email }
# MASKED: mailman_add function (lines 66-110)
def mailman_remove(contact, listname=None, userack=None, admin_notify=None):
"""Remove a Satchmo contact from a Mailman mailing list
Parameters:
- `contact`: A Satchmo contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `userack`: True or False, whether to notify the user, defaulting to the list default
- `admin_notify`: True or False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman removing %s from %s' % (contact.email, listname)
if mm.isMember(contact.email):
try:
mm.Lock()
mm.ApprovedDeleteMember(contact.email, 'satchmo.newsletter', admin_notify, userack)
mm.Save()
finally:
mm.Unlock()
def _get_maillist(listname):
try:
if not listname:
listname = config_value('NEWSLETTER', 'NEWSLETTER_NAME')
if listname == "":
log.warn("NEWSLETTER_NAME not set in store settings")
raise NameError('No NEWSLETTER_NAME in settings')
return MailList.MailList(listname, lock=0), listname
except Errors.MMUnknownListError:
print >> sys.stderr, "Can't find the MailMan newsletter: %s" % listname
raise NameError('No such newsletter, "%s"' % listname)
|
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None):
"""Add a Satchmo contact to a mailman mailing list.
Parameters:
- `Contact`: A Satchmo Contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `send_welcome_msg`: True or False, defaulting to the list default
- `admin_notify`: True of False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman adding %s to %s' % (contact.email, listname)
if send_welcome_msg is None:
send_welcome_msg = mm.send_welcome_msg
userdesc = UserDesc()
userdesc.fullname = contact.full_name
userdesc.address = contact.email
userdesc.digest = False
if mm.isMember(contact.email):
print >> sys.stderr, _('Already Subscribed: %s' % contact.email)
else:
try:
try:
mm.Lock()
mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify)
mm.Save()
print >> sys.stderr, _('Subscribed: %(email)s') % { 'email' : contact.email }
except Errors.MMAlreadyAMember:
print >> sys.stderr, _('Already a member: %(email)s') % { 'email' : contact.email }
except Errors.MMBadEmailError:
if userdesc.address == '':
print >> sys.stderr, _('Bad/Invalid email address: blank line')
else:
print >> sys.stderr, _('Bad/Invalid email address: %(email)s') % { 'email' : contact.email }
except Errors.MMHostileAddress:
print >> sys.stderr, _('Hostile address (illegal characters): %(email)s') % { 'email' : contact.email }
finally:
mm.Unlock()
| 66
| 110
|
"""A Mailman newsletter subscription interface.
To use this plugin, enable the newsletter module and set the newsletter module and name settings
in the admin settings page.
"""
from django.utils.translation import ugettext as _
from Mailman import MailList, Errors
from models import Subscription
from satchmo.configuration import config_value
import logging
import sys
log = logging.getLogger('newsletter.mailman')
class UserDesc: pass
def is_subscribed(contact):
return Subscription.email_is_subscribed(contact.email)
def update_contact(contact, subscribe, attributes={}):
email = contact.email
current = Subscription.email_is_subscribed(email)
attributesChanged = False
sub = None
if attributes:
sub, created = Subscription.objects.get_or_create(email=email)
if created:
attributesChanged = True
else:
oldAttr = [(a.name,a.value) for a in sub.attributes.all()]
oldAttr.sort()
sub.update_attributes(attributes)
newAttr = [(a.name,a.value) for a in sub.attributes.all()]
newAttr.sort()
if not created:
attributesChanged = oldAttr != newAttr
if current == subscribe:
if subscribe:
if attributesChanged:
result = _("Updated subscription for %(email)s.")
else:
result = _("Already subscribed %(email)s.")
else:
result = _("Already removed %(email)s.")
else:
if not sub:
sub, created = Subscription.objects.get_or_create(email=email)
sub.subscribed = subscribe
sub.save()
if subscribe:
mailman_add(contact)
result = _("Subscribed: %(email)s")
else:
mailman_remove(contact)
result = _("Unsubscribed: %(email)s")
return result % { 'email' : email }
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None):
"""Add a Satchmo contact to a mailman mailing list.
Parameters:
- `Contact`: A Satchmo Contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `send_welcome_msg`: True or False, defaulting to the list default
- `admin_notify`: True of False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman adding %s to %s' % (contact.email, listname)
if send_welcome_msg is None:
send_welcome_msg = mm.send_welcome_msg
userdesc = UserDesc()
userdesc.fullname = contact.full_name
userdesc.address = contact.email
userdesc.digest = False
if mm.isMember(contact.email):
print >> sys.stderr, _('Already Subscribed: %s' % contact.email)
else:
try:
try:
mm.Lock()
mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify)
mm.Save()
print >> sys.stderr, _('Subscribed: %(email)s') % { 'email' : contact.email }
except Errors.MMAlreadyAMember:
print >> sys.stderr, _('Already a member: %(email)s') % { 'email' : contact.email }
except Errors.MMBadEmailError:
if userdesc.address == '':
print >> sys.stderr, _('Bad/Invalid email address: blank line')
else:
print >> sys.stderr, _('Bad/Invalid email address: %(email)s') % { 'email' : contact.email }
except Errors.MMHostileAddress:
print >> sys.stderr, _('Hostile address (illegal characters): %(email)s') % { 'email' : contact.email }
finally:
mm.Unlock()
def mailman_remove(contact, listname=None, userack=None, admin_notify=None):
"""Remove a Satchmo contact from a Mailman mailing list
Parameters:
- `contact`: A Satchmo contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `userack`: True or False, whether to notify the user, defaulting to the list default
- `admin_notify`: True or False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman removing %s from %s' % (contact.email, listname)
if mm.isMember(contact.email):
try:
mm.Lock()
mm.ApprovedDeleteMember(contact.email, 'satchmo.newsletter', admin_notify, userack)
mm.Save()
finally:
mm.Unlock()
def _get_maillist(listname):
try:
if not listname:
listname = config_value('NEWSLETTER', 'NEWSLETTER_NAME')
if listname == "":
log.warn("NEWSLETTER_NAME not set in store settings")
raise NameError('No NEWSLETTER_NAME in settings')
return MailList.MailList(listname, lock=0), listname
except Errors.MMUnknownListError:
print >> sys.stderr, "Can't find the MailMan newsletter: %s" % listname
raise NameError('No such newsletter, "%s"' % listname)
|
test
|
Runs an object detection test configuration
This runs an object detection test configuration. This involves
1. Download and build a model architecture (or use cached).
2. Optimize the model architecrue
3. Benchmark the optimized model against a dataset
4. (optional) Run assertions to check the benchmark output
The input to this function is a JSON file which specifies the test
configuration.
example_test_config.json:
{
"model_config": { ... },
"optimization_config": { ... },
"benchmark_config": { ... },
"assertions": [ ... ]
}
model_config: A dictionary of arguments passed to build_model, which
specify the pre-optimized model architure. The model will be passed
to optimize_model.
optimization_config: A dictionary of arguments passed to optimize_model.
Please see help(optimize_model) for more details.
benchmark_config: A dictionary of arguments passed to benchmark_model.
Please see help(benchmark_model) for more details.
assertions: A list of strings containing python code that will be
evaluated. If the code returns false, an error will be thrown. These
assertions can reference any variables local to this 'test' function.
Some useful values are
statistics['map']
statistics['avg_latency']
statistics['avg_throughput']
Args
----
test_config_path: A string corresponding to the test configuration
JSON file.
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import argparse
import json
from .object_detection import build_model, download_dataset, optimize_model, benchmark_model
# MASKED: test function (lines 23-95)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'test_config_path',
help='Path of JSON file containing test configuration. Please'
'see help(tftrt.examples.object_detection.test) for more information')
args=parser.parse_args()
test(args.test_config_path)
|
def test(test_config_path):
"""Runs an object detection test configuration
This runs an object detection test configuration. This involves
1. Download and build a model architecture (or use cached).
2. Optimize the model architecrue
3. Benchmark the optimized model against a dataset
4. (optional) Run assertions to check the benchmark output
The input to this function is a JSON file which specifies the test
configuration.
example_test_config.json:
{
"model_config": { ... },
"optimization_config": { ... },
"benchmark_config": { ... },
"assertions": [ ... ]
}
model_config: A dictionary of arguments passed to build_model, which
specify the pre-optimized model architure. The model will be passed
to optimize_model.
optimization_config: A dictionary of arguments passed to optimize_model.
Please see help(optimize_model) for more details.
benchmark_config: A dictionary of arguments passed to benchmark_model.
Please see help(benchmark_model) for more details.
assertions: A list of strings containing python code that will be
evaluated. If the code returns false, an error will be thrown. These
assertions can reference any variables local to this 'test' function.
Some useful values are
statistics['map']
statistics['avg_latency']
statistics['avg_throughput']
Args
----
test_config_path: A string corresponding to the test configuration
JSON file.
"""
with open(args.test_config_path, 'r') as f:
test_config = json.load(f)
print(json.dumps(test_config, sort_keys=True, indent=4))
frozen_graph = build_model(
**test_config['model_config'])
# optimize model using source model
frozen_graph = optimize_model(
frozen_graph,
**test_config['optimization_config'])
# benchmark optimized model
statistics = benchmark_model(
frozen_graph=frozen_graph,
**test_config['benchmark_config'])
# print some statistics to command line
print_statistics = statistics
if 'runtimes_ms' in print_statistics:
print_statistics.pop('runtimes_ms')
print(json.dumps(print_statistics, sort_keys=True, indent=4))
# run assertions
if 'assertions' in test_config:
for a in test_config['assertions']:
if not eval(a):
raise AssertionError('ASSERTION FAILED: %s' % a)
else:
print('ASSERTION PASSED: %s' % a)
| 23
| 95
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import argparse
import json
from .object_detection import build_model, download_dataset, optimize_model, benchmark_model
def test(test_config_path):
"""Runs an object detection test configuration
This runs an object detection test configuration. This involves
1. Download and build a model architecture (or use cached).
2. Optimize the model architecrue
3. Benchmark the optimized model against a dataset
4. (optional) Run assertions to check the benchmark output
The input to this function is a JSON file which specifies the test
configuration.
example_test_config.json:
{
"model_config": { ... },
"optimization_config": { ... },
"benchmark_config": { ... },
"assertions": [ ... ]
}
model_config: A dictionary of arguments passed to build_model, which
specify the pre-optimized model architure. The model will be passed
to optimize_model.
optimization_config: A dictionary of arguments passed to optimize_model.
Please see help(optimize_model) for more details.
benchmark_config: A dictionary of arguments passed to benchmark_model.
Please see help(benchmark_model) for more details.
assertions: A list of strings containing python code that will be
evaluated. If the code returns false, an error will be thrown. These
assertions can reference any variables local to this 'test' function.
Some useful values are
statistics['map']
statistics['avg_latency']
statistics['avg_throughput']
Args
----
test_config_path: A string corresponding to the test configuration
JSON file.
"""
with open(args.test_config_path, 'r') as f:
test_config = json.load(f)
print(json.dumps(test_config, sort_keys=True, indent=4))
frozen_graph = build_model(
**test_config['model_config'])
# optimize model using source model
frozen_graph = optimize_model(
frozen_graph,
**test_config['optimization_config'])
# benchmark optimized model
statistics = benchmark_model(
frozen_graph=frozen_graph,
**test_config['benchmark_config'])
# print some statistics to command line
print_statistics = statistics
if 'runtimes_ms' in print_statistics:
print_statistics.pop('runtimes_ms')
print(json.dumps(print_statistics, sort_keys=True, indent=4))
# run assertions
if 'assertions' in test_config:
for a in test_config['assertions']:
if not eval(a):
raise AssertionError('ASSERTION FAILED: %s' % a)
else:
print('ASSERTION PASSED: %s' % a)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'test_config_path',
help='Path of JSON file containing test configuration. Please'
'see help(tftrt.examples.object_detection.test) for more information')
args=parser.parse_args()
test(args.test_config_path)
|
write_input
|
Get the string representation of the main input file and write it.
Also writes the data file if the lammps_data attribute is set.
Args:
input_filename (string): name of the input file
data_filename (string): override the data file name with this
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module implements classes for reading and generating Lammps inputset.
For the ease of management we divide LAMMPS input into 2 files:
1.Data file: All structure related settings such as the atomic positions,
bonds, angles, dihedrals, corresponding parametrizations etc are
set in the data file.
2. Control/input file: This is the main input file that should be fed to the
lammps binary. The main input file consists of the path to the
afore-mentioned data file and the job control parameters such as
the ensemble type(NVT, NPT etc), max number of iterations etc.
"""
import os
import six
from monty.json import MSONable, MontyDecoder
from pymatgen.io.lammps.data import LammpsData
from pymatgen.io.lammps.input import LammpsInput
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
class LammpsInputSet(MSONable):
def __init__(self, name, lammps_input, lammps_data=None,
data_filename="in.data", user_lammps_settings=None):
"""
Implementation of LammpsInputSet that is initialized from a dict
settings. It is typically used by other LammpsInputSets for
initialization from json or yaml source files.
Args:
name (str): A name for the input set.
lammps_input (LammpsInput): The config dictionary to use.
lammps_data (LammpsData): LammpsData object
data_filename (str): name of the the lammps data file.
Note: this will override the value for 'data_file' key in lammps_input
user_lammps_settings (dict): User lammps settings. This allows a user
to override lammps settings, e.g., setting a different force field
or bond type.
"""
self.name = name
self.lines = []
self.lammps_input = lammps_input
self.lammps_data = lammps_data
self.data_filename = data_filename
self.lammps_input.settings["data_file"] = data_filename
self.user_lammps_settings = user_lammps_settings or {}
self.lammps_input.settings.update(self.user_lammps_settings)
# MASKED: write_input function (lines 62-79)
@classmethod
def from_file(cls, name, input_template, user_settings,
lammps_data=None, data_filename="in.data"):
"""
Returns LammpsInputSet from input file template and input data.
Args:
name (str)
input_template (string): path to the input template file.
user_settings (dict): User lammps settings, the keys must
correspond to the keys in the template.
lammps_data (string/LammpsData): path to the
data file or an appropriate object
data_filename (string): name of the the lammps data file.
Returns:
LammpsInputSet
"""
user_settings["data_file"] = data_filename
lammps_input = LammpsInput.from_file(input_template, user_settings)
if isinstance(lammps_data, six.string_types):
lammps_data = LammpsData.from_file(lammps_data)
return cls(name, lammps_input, lammps_data=lammps_data,
data_filename=data_filename)
def as_dict(self):
d = MSONable.as_dict(self)
if hasattr(self, "kwargs"):
d.update(**self.kwargs)
d["lammps_input"] = self.lammps_input.as_dict()
return d
@classmethod
def from_dict(cls, d):
decoded = {k: MontyDecoder().process_decoded(v) for k, v in d.items()
if k not in ["@module", "@class", "lammps_input"]}
decoded["lammps_input"] = LammpsInput.from_dict(d["lammps_input"])
return cls(**decoded)
|
def write_input(self, input_filename, data_filename=None):
"""
Get the string representation of the main input file and write it.
Also writes the data file if the lammps_data attribute is set.
Args:
input_filename (string): name of the input file
data_filename (string): override the data file name with this
"""
if data_filename:
data_filename = os.path.abspath(os.path.join(os.getcwd(), data_filename))
if data_filename and ("data_file" in self.lammps_input.settings):
self.lammps_input.settings["data_file"] = data_filename
self.data_filename = data_filename
self.lammps_input.write_file(input_filename)
# write the data file if present
if self.lammps_data:
self.lammps_data.write_file(filename=self.data_filename)
| 62
| 79
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module implements classes for reading and generating Lammps inputset.
For the ease of management we divide LAMMPS input into 2 files:
1.Data file: All structure related settings such as the atomic positions,
bonds, angles, dihedrals, corresponding parametrizations etc are
set in the data file.
2. Control/input file: This is the main input file that should be fed to the
lammps binary. The main input file consists of the path to the
afore-mentioned data file and the job control parameters such as
the ensemble type(NVT, NPT etc), max number of iterations etc.
"""
import os
import six
from monty.json import MSONable, MontyDecoder
from pymatgen.io.lammps.data import LammpsData
from pymatgen.io.lammps.input import LammpsInput
__author__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
class LammpsInputSet(MSONable):
def __init__(self, name, lammps_input, lammps_data=None,
data_filename="in.data", user_lammps_settings=None):
"""
Implementation of LammpsInputSet that is initialized from a dict
settings. It is typically used by other LammpsInputSets for
initialization from json or yaml source files.
Args:
name (str): A name for the input set.
lammps_input (LammpsInput): The config dictionary to use.
lammps_data (LammpsData): LammpsData object
data_filename (str): name of the the lammps data file.
Note: this will override the value for 'data_file' key in lammps_input
user_lammps_settings (dict): User lammps settings. This allows a user
to override lammps settings, e.g., setting a different force field
or bond type.
"""
self.name = name
self.lines = []
self.lammps_input = lammps_input
self.lammps_data = lammps_data
self.data_filename = data_filename
self.lammps_input.settings["data_file"] = data_filename
self.user_lammps_settings = user_lammps_settings or {}
self.lammps_input.settings.update(self.user_lammps_settings)
def write_input(self, input_filename, data_filename=None):
"""
Get the string representation of the main input file and write it.
Also writes the data file if the lammps_data attribute is set.
Args:
input_filename (string): name of the input file
data_filename (string): override the data file name with this
"""
if data_filename:
data_filename = os.path.abspath(os.path.join(os.getcwd(), data_filename))
if data_filename and ("data_file" in self.lammps_input.settings):
self.lammps_input.settings["data_file"] = data_filename
self.data_filename = data_filename
self.lammps_input.write_file(input_filename)
# write the data file if present
if self.lammps_data:
self.lammps_data.write_file(filename=self.data_filename)
@classmethod
def from_file(cls, name, input_template, user_settings,
lammps_data=None, data_filename="in.data"):
"""
Returns LammpsInputSet from input file template and input data.
Args:
name (str)
input_template (string): path to the input template file.
user_settings (dict): User lammps settings, the keys must
correspond to the keys in the template.
lammps_data (string/LammpsData): path to the
data file or an appropriate object
data_filename (string): name of the the lammps data file.
Returns:
LammpsInputSet
"""
user_settings["data_file"] = data_filename
lammps_input = LammpsInput.from_file(input_template, user_settings)
if isinstance(lammps_data, six.string_types):
lammps_data = LammpsData.from_file(lammps_data)
return cls(name, lammps_input, lammps_data=lammps_data,
data_filename=data_filename)
def as_dict(self):
d = MSONable.as_dict(self)
if hasattr(self, "kwargs"):
d.update(**self.kwargs)
d["lammps_input"] = self.lammps_input.as_dict()
return d
@classmethod
def from_dict(cls, d):
decoded = {k: MontyDecoder().process_decoded(v) for k, v in d.items()
if k not in ["@module", "@class", "lammps_input"]}
decoded["lammps_input"] = LammpsInput.from_dict(d["lammps_input"])
return cls(**decoded)
|
transforms
|
:param item: sample = deepcopy(self.items[index])
:param cfg: cfg
:return:
eval() transform str to list, dict, tuple. Here is a series of the transform methods in turn.
|
from .hflip import hflip
from .resize import resize
from .pad import pad
from .random_crop import random_crop
from .to_tensor import to_tensor
from .random_erasing import random_erasing
from .random_sized_rect_crop import random_sized_rect_crop
# MASKED: transforms function (lines 10-30)
|
def transforms(item, cfg, mode):
"""
:param item: sample = deepcopy(self.items[index])
:param cfg: cfg
:return:
eval() transform str to list, dict, tuple. Here is a series of the transform methods in turn.
"""
transforms_dataset_factory = {
'train': cfg.dataset.train,
'test': cfg.dataset.test
}
if transforms_dataset_factory[mode].before_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].before_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
item = to_tensor(item, cfg)
if transforms_dataset_factory[mode].after_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].after_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
return item
| 10
| 30
|
from .hflip import hflip
from .resize import resize
from .pad import pad
from .random_crop import random_crop
from .to_tensor import to_tensor
from .random_erasing import random_erasing
from .random_sized_rect_crop import random_sized_rect_crop
def transforms(item, cfg, mode):
"""
:param item: sample = deepcopy(self.items[index])
:param cfg: cfg
:return:
eval() transform str to list, dict, tuple. Here is a series of the transform methods in turn.
"""
transforms_dataset_factory = {
'train': cfg.dataset.train,
'test': cfg.dataset.test
}
if transforms_dataset_factory[mode].before_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].before_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
item = to_tensor(item, cfg)
if transforms_dataset_factory[mode].after_to_tensor_transform_list is not None:
for t in transforms_dataset_factory[mode].after_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
return item
|
load_image_from_file
|
Given a filename, try to open the file. If failed, return None.
Args:
filename: location of the image file
shape: the shape of the image file to be scaled
Returns:
the image if succeeds, None if fails.
Rasies:
exception if the image was not the right shape.
|
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Activation generator helper classes for TCAV"""
from abc import ABCMeta
from abc import abstractmethod
from multiprocessing import dummy as multiprocessing
import os.path
import numpy as np
import PIL.Image
import tensorflow as tf
class ActivationGeneratorInterface(object):
"""Interface for an activation generator for a model"""
__metaclass__ = ABCMeta
@abstractmethod
def process_and_load_activations(self, bottleneck_names, concepts):
pass
@abstractmethod
def get_model():
pass
class ActivationGeneratorBase(ActivationGeneratorInterface):
"""Basic abstract activation generator for a model"""
def __init__(self, model, acts_dir, max_examples=500):
self.model = model
self.acts_dir = acts_dir
self.max_examples = max_examples
def get_model(self):
return self.model
@abstractmethod
def get_examples_for_concept(self, concept):
pass
def get_activations_for_concept(self, concept, bottleneck):
examples = self.get_examples_for_concept(concept)
return self.get_activations_for_examples(examples, bottleneck)
def get_activations_for_examples(self, examples, bottleneck):
acts = self.model.run_examples(examples, bottleneck)
return self.model.reshape_activations(acts).squeeze()
def process_and_load_activations(self, bottleneck_names, concepts):
acts = {}
if self.acts_dir and not tf.gfile.Exists(self.acts_dir):
tf.gfile.MakeDirs(self.acts_dir)
for concept in concepts:
if concept not in acts:
acts[concept] = {}
for bottleneck_name in bottleneck_names:
acts_path = os.path.join(self.acts_dir, 'acts_{}_{}'.format(
concept, bottleneck_name)) if self.acts_dir else None
if acts_path and tf.gfile.Exists(acts_path):
with tf.gfile.Open(acts_path, 'rb') as f:
acts[concept][bottleneck_name] = np.load(f).squeeze()
tf.logging.info('Loaded {} shape {}'.format(
acts_path, acts[concept][bottleneck_name].shape))
else:
acts[concept][bottleneck_name] = self.get_activations_for_concept(
concept, bottleneck_name)
if acts_path:
tf.logging.info('{} does not exist, Making one...'.format(
acts_path))
with tf.gfile.Open(acts_path, 'w') as f:
np.save(f, acts[concept][bottleneck_name], allow_pickle=False)
return acts
class ImageActivationGenerator(ActivationGeneratorBase):
"""Activation generator for a basic image model"""
def __init__(self, model, source_dir, acts_dir, max_examples=10):
self.source_dir = source_dir
super(ImageActivationGenerator, self).__init__(
model, acts_dir, max_examples)
def get_examples_for_concept(self, concept):
concept_dir = os.path.join(self.source_dir, concept)
img_paths = [os.path.join(concept_dir, d)
for d in tf.gfile.ListDirectory(concept_dir)]
imgs = self.load_images_from_files(img_paths, self.max_examples,
shape=self.model.get_image_shape()[:2])
return imgs
# MASKED: load_image_from_file function (lines 107-137)
def load_images_from_files(self, filenames, max_imgs=500,
do_shuffle=True, run_parallel=True,
shape=(299, 299),
num_workers=100):
"""Return image arrays from filenames.
Args:
filenames: locations of image files.
max_imgs: maximum number of images from filenames.
do_shuffle: before getting max_imgs files, shuffle the names or not
run_parallel: get images in parallel or not
shape: desired shape of the image
num_workers: number of workers in parallelization.
Returns:
image arrays
"""
imgs = []
# First shuffle a copy of the filenames.
filenames = filenames[:]
if do_shuffle:
np.random.shuffle(filenames)
if run_parallel:
pool = multiprocessing.Pool(num_workers)
imgs = pool.map(
lambda filename: self.load_image_from_file(filename, shape),
filenames[:max_imgs])
imgs = [img for img in imgs if img is not None]
if len(imgs) <= 1:
raise ValueError('You must have more than 1 image in each class to run TCAV.')
else:
for filename in filenames:
img = self.load_image_from_file(filename, shape)
if img is not None:
imgs.append(img)
if len(imgs) <= 1:
raise ValueError('You must have more than 1 image in each class to run TCAV.')
elif len(imgs) >= max_imgs:
break
return np.array(imgs)
|
def load_image_from_file(self, filename, shape):
"""Given a filename, try to open the file. If failed, return None.
Args:
filename: location of the image file
shape: the shape of the image file to be scaled
Returns:
the image if succeeds, None if fails.
Rasies:
exception if the image was not the right shape.
"""
if not tf.gfile.Exists(filename):
tf.logging.error('Cannot find file: {}'.format(filename))
return None
try:
# ensure image has no transparency channel
img = np.array(PIL.Image.open(tf.gfile.Open(filename, 'rb')).convert(
'RGB').resize(shape, PIL.Image.BILINEAR))
# Normalize pixel values to between 0 and 1.
img = np.float32(img) / 255.0
if not (len(img.shape) == 3 and img.shape[2] == 3):
return None
else:
return img
except Exception as e:
tf.logging.info(e)
return None
return img
| 107
| 137
|
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" Activation generator helper classes for TCAV"""
from abc import ABCMeta
from abc import abstractmethod
from multiprocessing import dummy as multiprocessing
import os.path
import numpy as np
import PIL.Image
import tensorflow as tf
class ActivationGeneratorInterface(object):
"""Interface for an activation generator for a model"""
__metaclass__ = ABCMeta
@abstractmethod
def process_and_load_activations(self, bottleneck_names, concepts):
pass
@abstractmethod
def get_model():
pass
class ActivationGeneratorBase(ActivationGeneratorInterface):
"""Basic abstract activation generator for a model"""
def __init__(self, model, acts_dir, max_examples=500):
self.model = model
self.acts_dir = acts_dir
self.max_examples = max_examples
def get_model(self):
return self.model
@abstractmethod
def get_examples_for_concept(self, concept):
pass
def get_activations_for_concept(self, concept, bottleneck):
examples = self.get_examples_for_concept(concept)
return self.get_activations_for_examples(examples, bottleneck)
def get_activations_for_examples(self, examples, bottleneck):
acts = self.model.run_examples(examples, bottleneck)
return self.model.reshape_activations(acts).squeeze()
def process_and_load_activations(self, bottleneck_names, concepts):
acts = {}
if self.acts_dir and not tf.gfile.Exists(self.acts_dir):
tf.gfile.MakeDirs(self.acts_dir)
for concept in concepts:
if concept not in acts:
acts[concept] = {}
for bottleneck_name in bottleneck_names:
acts_path = os.path.join(self.acts_dir, 'acts_{}_{}'.format(
concept, bottleneck_name)) if self.acts_dir else None
if acts_path and tf.gfile.Exists(acts_path):
with tf.gfile.Open(acts_path, 'rb') as f:
acts[concept][bottleneck_name] = np.load(f).squeeze()
tf.logging.info('Loaded {} shape {}'.format(
acts_path, acts[concept][bottleneck_name].shape))
else:
acts[concept][bottleneck_name] = self.get_activations_for_concept(
concept, bottleneck_name)
if acts_path:
tf.logging.info('{} does not exist, Making one...'.format(
acts_path))
with tf.gfile.Open(acts_path, 'w') as f:
np.save(f, acts[concept][bottleneck_name], allow_pickle=False)
return acts
class ImageActivationGenerator(ActivationGeneratorBase):
"""Activation generator for a basic image model"""
def __init__(self, model, source_dir, acts_dir, max_examples=10):
self.source_dir = source_dir
super(ImageActivationGenerator, self).__init__(
model, acts_dir, max_examples)
def get_examples_for_concept(self, concept):
concept_dir = os.path.join(self.source_dir, concept)
img_paths = [os.path.join(concept_dir, d)
for d in tf.gfile.ListDirectory(concept_dir)]
imgs = self.load_images_from_files(img_paths, self.max_examples,
shape=self.model.get_image_shape()[:2])
return imgs
def load_image_from_file(self, filename, shape):
"""Given a filename, try to open the file. If failed, return None.
Args:
filename: location of the image file
shape: the shape of the image file to be scaled
Returns:
the image if succeeds, None if fails.
Rasies:
exception if the image was not the right shape.
"""
if not tf.gfile.Exists(filename):
tf.logging.error('Cannot find file: {}'.format(filename))
return None
try:
# ensure image has no transparency channel
img = np.array(PIL.Image.open(tf.gfile.Open(filename, 'rb')).convert(
'RGB').resize(shape, PIL.Image.BILINEAR))
# Normalize pixel values to between 0 and 1.
img = np.float32(img) / 255.0
if not (len(img.shape) == 3 and img.shape[2] == 3):
return None
else:
return img
except Exception as e:
tf.logging.info(e)
return None
return img
def load_images_from_files(self, filenames, max_imgs=500,
do_shuffle=True, run_parallel=True,
shape=(299, 299),
num_workers=100):
"""Return image arrays from filenames.
Args:
filenames: locations of image files.
max_imgs: maximum number of images from filenames.
do_shuffle: before getting max_imgs files, shuffle the names or not
run_parallel: get images in parallel or not
shape: desired shape of the image
num_workers: number of workers in parallelization.
Returns:
image arrays
"""
imgs = []
# First shuffle a copy of the filenames.
filenames = filenames[:]
if do_shuffle:
np.random.shuffle(filenames)
if run_parallel:
pool = multiprocessing.Pool(num_workers)
imgs = pool.map(
lambda filename: self.load_image_from_file(filename, shape),
filenames[:max_imgs])
imgs = [img for img in imgs if img is not None]
if len(imgs) <= 1:
raise ValueError('You must have more than 1 image in each class to run TCAV.')
else:
for filename in filenames:
img = self.load_image_from_file(filename, shape)
if img is not None:
imgs.append(img)
if len(imgs) <= 1:
raise ValueError('You must have more than 1 image in each class to run TCAV.')
elif len(imgs) >= max_imgs:
break
return np.array(imgs)
|
get
|
Get the resulted command-line options.
Decode and merge specified command-line options with the default one.
Returns
-------
dict
Decoded and verified config value.
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses Modin configs originated from environment variables."""
import os
import sys
from textwrap import dedent
import warnings
from packaging import version
import secrets
from .pubsub import Parameter, _TYPE_PARAMS, ExactStr, ValueSource
class EnvironmentVariable(Parameter, type=str, abstract=True):
"""Base class for environment variables-based configuration."""
varname: str = None
@classmethod
def _get_raw_from_config(cls) -> str:
"""
Read the value from environment variable.
Returns
-------
str
Config raw value.
Raises
------
KeyError
If value is absent.
"""
return os.environ[cls.varname]
@classmethod
def get_help(cls) -> str:
"""
Generate user-presentable help for the config.
Returns
-------
str
"""
help = f"{cls.varname}: {dedent(cls.__doc__ or 'Unknown').strip()}\n\tProvide {_TYPE_PARAMS[cls.type].help}"
if cls.choices:
help += f" (valid examples are: {', '.join(str(c) for c in cls.choices)})"
return help
class IsDebug(EnvironmentVariable, type=bool):
"""Force Modin engine to be "Python" unless specified by $MODIN_ENGINE."""
varname = "MODIN_DEBUG"
class Engine(EnvironmentVariable, type=str):
"""Distribution engine to run queries by."""
varname = "MODIN_ENGINE"
choices = ("Ray", "Dask", "Python", "Native")
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
str
"""
if IsDebug.get():
return "Python"
try:
import ray
except ImportError:
pass
else:
if version.parse(ray.__version__) < version.parse("1.4.0"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
return "Ray"
try:
import dask
import distributed
except ImportError:
pass
else:
if version.parse(dask.__version__) < version.parse(
"2.22.0"
) or version.parse(distributed.__version__) < version.parse("2.22.0"):
raise ImportError(
"Please `pip install modin[dask]` to install compatible Dask version."
)
return "Dask"
try:
import omniscidbe # noqa
except ImportError:
try:
import dbe # noqa
except ImportError:
pass
else:
return "Native"
else:
return "Native"
raise ImportError(
"Please refer to installation documentation page to install an engine"
)
class Backend(EnvironmentVariable, type=str):
"""Engine to run on a single node of distribution."""
varname = "MODIN_BACKEND"
default = "Pandas"
choices = ("Pandas", "OmniSci", "Pyarrow", "Cudf")
class IsExperimental(EnvironmentVariable, type=bool):
"""Whether to Turn on experimental features."""
varname = "MODIN_EXPERIMENTAL"
class IsRayCluster(EnvironmentVariable, type=bool):
"""Whether Modin is running on pre-initialized Ray cluster."""
varname = "MODIN_RAY_CLUSTER"
class RayRedisAddress(EnvironmentVariable, type=ExactStr):
"""Redis address to connect to when running in Ray cluster."""
varname = "MODIN_REDIS_ADDRESS"
class RayRedisPassword(EnvironmentVariable, type=ExactStr):
"""What password to use for connecting to Redis."""
varname = "MODIN_REDIS_PASSWORD"
default = secrets.token_hex(32)
class CpuCount(EnvironmentVariable, type=int):
"""How many CPU cores to use during initialization of the Modin engine."""
varname = "MODIN_CPUS"
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
import multiprocessing
return multiprocessing.cpu_count()
class GpuCount(EnvironmentVariable, type=int):
"""How may GPU devices to utilize across the whole distribution."""
varname = "MODIN_GPUS"
class Memory(EnvironmentVariable, type=int):
"""
How much memory (in bytes) give to an execution engine.
Notes
-----
* In Ray case: the amount of memory to start the Plasma object store with.
* In Dask case: the amount of memory that is given to each worker depending on CPUs used.
"""
varname = "MODIN_MEMORY"
class NPartitions(EnvironmentVariable, type=int):
"""How many partitions to use for a Modin DataFrame (along each axis)."""
varname = "MODIN_NPARTITIONS"
@classmethod
def _put(cls, value):
"""
Put specific value if NPartitions wasn't set by a user yet.
Parameters
----------
value : int
Config value to set.
Notes
-----
This method is used to set NPartitions from cluster resources internally
and should not be called by a user.
"""
if cls.get_value_source() == ValueSource.DEFAULT:
cls.put(value)
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
if Backend.get() == "Cudf":
return GpuCount.get()
else:
return CpuCount.get()
class SocksProxy(EnvironmentVariable, type=ExactStr):
"""SOCKS proxy address if it is needed for SSH to work."""
varname = "MODIN_SOCKS_PROXY"
class DoLogRpyc(EnvironmentVariable, type=bool):
"""Whether to gather RPyC logs (applicable for remote context)."""
varname = "MODIN_LOG_RPYC"
class DoTraceRpyc(EnvironmentVariable, type=bool):
"""Whether to trace RPyC calls (applicable for remote context)."""
varname = "MODIN_TRACE_RPYC"
class OmnisciFragmentSize(EnvironmentVariable, type=int):
"""How big a fragment in OmniSci should be when creating a table (in rows)."""
varname = "MODIN_OMNISCI_FRAGMENT_SIZE"
class DoUseCalcite(EnvironmentVariable, type=bool):
"""Whether to use Calcite for OmniSci queries execution."""
varname = "MODIN_USE_CALCITE"
default = True
class TestDatasetSize(EnvironmentVariable, type=str):
"""Dataset size for running some tests."""
varname = "MODIN_TEST_DATASET_SIZE"
choices = ("Small", "Normal", "Big")
class TestRayClient(EnvironmentVariable, type=bool):
"""Set to true to start and connect Ray client before a testing session starts."""
varname = "MODIN_TEST_RAY_CLIENT"
default = False
class TrackFileLeaks(EnvironmentVariable, type=bool):
"""Whether to track for open file handles leakage during testing."""
varname = "MODIN_TEST_TRACK_FILE_LEAKS"
# Turn off tracking on Windows by default because
# psutil's open_files() can be extremely slow on Windows (up to adding a few hours).
# see https://github.com/giampaolo/psutil/pull/597
default = sys.platform != "win32"
class AsvImplementation(EnvironmentVariable, type=ExactStr):
"""Allows to select a library that we will use for testing performance."""
varname = "MODIN_ASV_USE_IMPL"
choices = ("modin", "pandas")
default = "modin"
class AsvDataSizeConfig(EnvironmentVariable, type=ExactStr):
"""Allows to override default size of data (shapes)."""
varname = "MODIN_ASV_DATASIZE_CONFIG"
default = None
class ProgressBar(EnvironmentVariable, type=bool):
"""Whether or not to show the progress bar."""
varname = "MODIN_PROGRESS_BAR"
default = False
@classmethod
def enable(cls):
"""Enable ``ProgressBar`` feature."""
cls.put(True)
@classmethod
def disable(cls):
"""Disable ``ProgressBar`` feature."""
cls.put(False)
@classmethod
def put(cls, value):
"""
Set ``ProgressBar`` value only if synchronous benchmarking is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and BenchmarkMode.get():
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value)
class BenchmarkMode(EnvironmentVariable, type=bool):
"""Whether or not to perform computations synchronously."""
varname = "MODIN_BENCHMARK_MODE"
default = False
@classmethod
def put(cls, value):
"""
Set ``BenchmarkMode`` value only if progress bar feature is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and ProgressBar.get():
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value)
class PersistentPickle(EnvironmentVariable, type=bool):
"""Wheather serialization should be persistent."""
varname = "MODIN_PERSISTENT_PICKLE"
# When set to off, it allows faster serialization which is only
# valid in current run (i.e. useless for saving to disk).
# When set to on, Modin objects could be saved to disk and loaded
# but serialization/deserialization could take more time.
default = False
class OmnisciLaunchParameters(EnvironmentVariable, type=dict):
"""
Additional command line options for the OmniSci engine.
Please visit OmniSci documentation for the description of available parameters:
https://docs.omnisci.com/installation-and-configuration/config-parameters#configuration-parameters-for-omniscidb
"""
varname = "MODIN_OMNISCI_LAUNCH_PARAMETERS"
default = {
"enable_union": 1,
"enable_columnar_output": 1,
"enable_lazy_fetch": 0,
"null_div_by_zero": 1,
"enable_watchdog": 0,
}
# MASKED: get function (lines 387-404)
def _check_vars():
"""
Check validity of environment variables.
Look out for any environment variables that start with "MODIN_" prefix
that are unknown - they might be a typo, so warn a user.
"""
valid_names = {
obj.varname
for obj in globals().values()
if isinstance(obj, type)
and issubclass(obj, EnvironmentVariable)
and not obj.is_abstract
}
found_names = {name for name in os.environ if name.startswith("MODIN_")}
unknown = found_names - valid_names
if unknown:
warnings.warn(
f"Found unknown environment variable{'s' if len(unknown) > 1 else ''},"
f" please check {'their' if len(unknown) > 1 else 'its'} spelling: "
+ ", ".join(sorted(unknown))
)
_check_vars()
|
@classmethod
def get(self):
"""
Get the resulted command-line options.
Decode and merge specified command-line options with the default one.
Returns
-------
dict
Decoded and verified config value.
"""
custom_parameters = super().get()
result = self.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()}
)
return result
| 387
| 404
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses Modin configs originated from environment variables."""
import os
import sys
from textwrap import dedent
import warnings
from packaging import version
import secrets
from .pubsub import Parameter, _TYPE_PARAMS, ExactStr, ValueSource
class EnvironmentVariable(Parameter, type=str, abstract=True):
"""Base class for environment variables-based configuration."""
varname: str = None
@classmethod
def _get_raw_from_config(cls) -> str:
"""
Read the value from environment variable.
Returns
-------
str
Config raw value.
Raises
------
KeyError
If value is absent.
"""
return os.environ[cls.varname]
@classmethod
def get_help(cls) -> str:
"""
Generate user-presentable help for the config.
Returns
-------
str
"""
help = f"{cls.varname}: {dedent(cls.__doc__ or 'Unknown').strip()}\n\tProvide {_TYPE_PARAMS[cls.type].help}"
if cls.choices:
help += f" (valid examples are: {', '.join(str(c) for c in cls.choices)})"
return help
class IsDebug(EnvironmentVariable, type=bool):
"""Force Modin engine to be "Python" unless specified by $MODIN_ENGINE."""
varname = "MODIN_DEBUG"
class Engine(EnvironmentVariable, type=str):
"""Distribution engine to run queries by."""
varname = "MODIN_ENGINE"
choices = ("Ray", "Dask", "Python", "Native")
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
str
"""
if IsDebug.get():
return "Python"
try:
import ray
except ImportError:
pass
else:
if version.parse(ray.__version__) < version.parse("1.4.0"):
raise ImportError(
"Please `pip install modin[ray]` to install compatible Ray version."
)
return "Ray"
try:
import dask
import distributed
except ImportError:
pass
else:
if version.parse(dask.__version__) < version.parse(
"2.22.0"
) or version.parse(distributed.__version__) < version.parse("2.22.0"):
raise ImportError(
"Please `pip install modin[dask]` to install compatible Dask version."
)
return "Dask"
try:
import omniscidbe # noqa
except ImportError:
try:
import dbe # noqa
except ImportError:
pass
else:
return "Native"
else:
return "Native"
raise ImportError(
"Please refer to installation documentation page to install an engine"
)
class Backend(EnvironmentVariable, type=str):
"""Engine to run on a single node of distribution."""
varname = "MODIN_BACKEND"
default = "Pandas"
choices = ("Pandas", "OmniSci", "Pyarrow", "Cudf")
class IsExperimental(EnvironmentVariable, type=bool):
"""Whether to Turn on experimental features."""
varname = "MODIN_EXPERIMENTAL"
class IsRayCluster(EnvironmentVariable, type=bool):
"""Whether Modin is running on pre-initialized Ray cluster."""
varname = "MODIN_RAY_CLUSTER"
class RayRedisAddress(EnvironmentVariable, type=ExactStr):
"""Redis address to connect to when running in Ray cluster."""
varname = "MODIN_REDIS_ADDRESS"
class RayRedisPassword(EnvironmentVariable, type=ExactStr):
"""What password to use for connecting to Redis."""
varname = "MODIN_REDIS_PASSWORD"
default = secrets.token_hex(32)
class CpuCount(EnvironmentVariable, type=int):
"""How many CPU cores to use during initialization of the Modin engine."""
varname = "MODIN_CPUS"
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
import multiprocessing
return multiprocessing.cpu_count()
class GpuCount(EnvironmentVariable, type=int):
"""How may GPU devices to utilize across the whole distribution."""
varname = "MODIN_GPUS"
class Memory(EnvironmentVariable, type=int):
"""
How much memory (in bytes) give to an execution engine.
Notes
-----
* In Ray case: the amount of memory to start the Plasma object store with.
* In Dask case: the amount of memory that is given to each worker depending on CPUs used.
"""
varname = "MODIN_MEMORY"
class NPartitions(EnvironmentVariable, type=int):
"""How many partitions to use for a Modin DataFrame (along each axis)."""
varname = "MODIN_NPARTITIONS"
@classmethod
def _put(cls, value):
"""
Put specific value if NPartitions wasn't set by a user yet.
Parameters
----------
value : int
Config value to set.
Notes
-----
This method is used to set NPartitions from cluster resources internally
and should not be called by a user.
"""
if cls.get_value_source() == ValueSource.DEFAULT:
cls.put(value)
@classmethod
def _get_default(cls):
"""
Get default value of the config.
Returns
-------
int
"""
if Backend.get() == "Cudf":
return GpuCount.get()
else:
return CpuCount.get()
class SocksProxy(EnvironmentVariable, type=ExactStr):
"""SOCKS proxy address if it is needed for SSH to work."""
varname = "MODIN_SOCKS_PROXY"
class DoLogRpyc(EnvironmentVariable, type=bool):
"""Whether to gather RPyC logs (applicable for remote context)."""
varname = "MODIN_LOG_RPYC"
class DoTraceRpyc(EnvironmentVariable, type=bool):
"""Whether to trace RPyC calls (applicable for remote context)."""
varname = "MODIN_TRACE_RPYC"
class OmnisciFragmentSize(EnvironmentVariable, type=int):
"""How big a fragment in OmniSci should be when creating a table (in rows)."""
varname = "MODIN_OMNISCI_FRAGMENT_SIZE"
class DoUseCalcite(EnvironmentVariable, type=bool):
"""Whether to use Calcite for OmniSci queries execution."""
varname = "MODIN_USE_CALCITE"
default = True
class TestDatasetSize(EnvironmentVariable, type=str):
"""Dataset size for running some tests."""
varname = "MODIN_TEST_DATASET_SIZE"
choices = ("Small", "Normal", "Big")
class TestRayClient(EnvironmentVariable, type=bool):
"""Set to true to start and connect Ray client before a testing session starts."""
varname = "MODIN_TEST_RAY_CLIENT"
default = False
class TrackFileLeaks(EnvironmentVariable, type=bool):
"""Whether to track for open file handles leakage during testing."""
varname = "MODIN_TEST_TRACK_FILE_LEAKS"
# Turn off tracking on Windows by default because
# psutil's open_files() can be extremely slow on Windows (up to adding a few hours).
# see https://github.com/giampaolo/psutil/pull/597
default = sys.platform != "win32"
class AsvImplementation(EnvironmentVariable, type=ExactStr):
"""Allows to select a library that we will use for testing performance."""
varname = "MODIN_ASV_USE_IMPL"
choices = ("modin", "pandas")
default = "modin"
class AsvDataSizeConfig(EnvironmentVariable, type=ExactStr):
"""Allows to override default size of data (shapes)."""
varname = "MODIN_ASV_DATASIZE_CONFIG"
default = None
class ProgressBar(EnvironmentVariable, type=bool):
"""Whether or not to show the progress bar."""
varname = "MODIN_PROGRESS_BAR"
default = False
@classmethod
def enable(cls):
"""Enable ``ProgressBar`` feature."""
cls.put(True)
@classmethod
def disable(cls):
"""Disable ``ProgressBar`` feature."""
cls.put(False)
@classmethod
def put(cls, value):
"""
Set ``ProgressBar`` value only if synchronous benchmarking is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and BenchmarkMode.get():
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value)
class BenchmarkMode(EnvironmentVariable, type=bool):
"""Whether or not to perform computations synchronously."""
varname = "MODIN_BENCHMARK_MODE"
default = False
@classmethod
def put(cls, value):
"""
Set ``BenchmarkMode`` value only if progress bar feature is disabled.
Parameters
----------
value : bool
Config value to set.
"""
if value and ProgressBar.get():
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value)
class PersistentPickle(EnvironmentVariable, type=bool):
"""Wheather serialization should be persistent."""
varname = "MODIN_PERSISTENT_PICKLE"
# When set to off, it allows faster serialization which is only
# valid in current run (i.e. useless for saving to disk).
# When set to on, Modin objects could be saved to disk and loaded
# but serialization/deserialization could take more time.
default = False
class OmnisciLaunchParameters(EnvironmentVariable, type=dict):
"""
Additional command line options for the OmniSci engine.
Please visit OmniSci documentation for the description of available parameters:
https://docs.omnisci.com/installation-and-configuration/config-parameters#configuration-parameters-for-omniscidb
"""
varname = "MODIN_OMNISCI_LAUNCH_PARAMETERS"
default = {
"enable_union": 1,
"enable_columnar_output": 1,
"enable_lazy_fetch": 0,
"null_div_by_zero": 1,
"enable_watchdog": 0,
}
@classmethod
def get(self):
"""
Get the resulted command-line options.
Decode and merge specified command-line options with the default one.
Returns
-------
dict
Decoded and verified config value.
"""
custom_parameters = super().get()
result = self.default.copy()
result.update(
{key.replace("-", "_"): value for key, value in custom_parameters.items()}
)
return result
def _check_vars():
"""
Check validity of environment variables.
Look out for any environment variables that start with "MODIN_" prefix
that are unknown - they might be a typo, so warn a user.
"""
valid_names = {
obj.varname
for obj in globals().values()
if isinstance(obj, type)
and issubclass(obj, EnvironmentVariable)
and not obj.is_abstract
}
found_names = {name for name in os.environ if name.startswith("MODIN_")}
unknown = found_names - valid_names
if unknown:
warnings.warn(
f"Found unknown environment variable{'s' if len(unknown) > 1 else ''},"
f" please check {'their' if len(unknown) > 1 else 'its'} spelling: "
+ ", ".join(sorted(unknown))
)
_check_vars()
|
bulk
|
Used by the monitoring features to send monitoring data.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.10/monitor-elasticsearch-cluster.html>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg body: The operation definition and data (action-data
pairs), separated by newlines
:arg doc_type: Default document type for items which don't
provide one
:arg interval: Collection interval (e.g., '10s' or '10000ms') of
the payload
:arg system_api_version: API Version of the monitored system
:arg system_id: Identifier of the monitored system
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
class MonitoringClient(NamespacedClient):
# MASKED: bulk function (lines 22-53)
|
@query_params("interval", "system_api_version", "system_id")
def bulk(self, body, doc_type=None, params=None, headers=None):
"""
Used by the monitoring features to send monitoring data.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.10/monitor-elasticsearch-cluster.html>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg body: The operation definition and data (action-data
pairs), separated by newlines
:arg doc_type: Default document type for items which don't
provide one
:arg interval: Collection interval (e.g., '10s' or '10000ms') of
the payload
:arg system_api_version: API Version of the monitored system
:arg system_id: Identifier of the monitored system
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
body = _bulk_body(self.transport.serializer, body)
return self.transport.perform_request(
"POST",
_make_path("_monitoring", doc_type, "bulk"),
params=params,
headers=headers,
body=body,
)
| 22
| 53
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body
class MonitoringClient(NamespacedClient):
@query_params("interval", "system_api_version", "system_id")
def bulk(self, body, doc_type=None, params=None, headers=None):
"""
Used by the monitoring features to send monitoring data.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.10/monitor-elasticsearch-cluster.html>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg body: The operation definition and data (action-data
pairs), separated by newlines
:arg doc_type: Default document type for items which don't
provide one
:arg interval: Collection interval (e.g., '10s' or '10000ms') of
the payload
:arg system_api_version: API Version of the monitored system
:arg system_id: Identifier of the monitored system
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
body = _bulk_body(self.transport.serializer, body)
return self.transport.perform_request(
"POST",
_make_path("_monitoring", doc_type, "bulk"),
params=params,
headers=headers,
body=body,
)
|
__init__
|
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
|
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
if rmin != 0:
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
else:
x[:] = r * np.cos(t)
y[:] = r * np.sin(t)
return xy
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
# MASKED: __init__ function (lines 221-240)
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(0)
self.set_theta_direction(1)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
set_rscale = Axes.set_yscale
set_rticks = Axes.set_yticks
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
|
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', None)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
| 221
| 240
|
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
if rmin != 0:
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
else:
x[:] = r * np.cos(t)
y[:] = r * np.sin(t)
return xy
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', None)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(0)
self.set_theta_direction(1)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
set_rscale = Axes.set_yscale
set_rticks = Axes.set_yticks
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
|
__init__
|
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
|
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
if rmin != 0:
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
else:
x[:] = r * np.cos(t)
y[:] = r * np.sin(t)
return xy
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
# MASKED: __init__ function (lines 101-111)
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', None)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(0)
self.set_theta_direction(1)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
set_rscale = Axes.set_yscale
set_rticks = Axes.set_yticks
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
|
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
| 101
| 111
|
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
if rmin != 0:
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
else:
x[:] = r * np.cos(t)
y[:] = r * np.sin(t)
return xy
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', None)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(0)
self.set_theta_direction(1)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
set_rscale = Axes.set_yscale
set_rticks = Axes.set_yticks
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
|
cire
|
Cross-iteration redundancies elimination.
Parameters
----------
cluster : Cluster
Input Cluster, subject of the optimization pass.
mode : str
The transformation mode. Accepted: ['invariants', 'sops'].
* 'invariants' is for sub-expressions that are invariant w.r.t. one or
more Dimensions.
* 'sops' stands for sums-of-products, that is redundancies are searched
across all expressions in sum-of-product form.
sregistry : SymbolRegistry
The symbol registry, to create unique temporary names.
options : dict
The optimization options.
Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].
* 'min-storage': if True, the pass will try to minimize the amount of
storage introduced for the tensor temporaries. This might also reduce
the operation count. On the other hand, this might affect fusion and
therefore data locality. Defaults to False (legacy).
* 'cire-maxpar': if True, privilege parallelism over working set size,
that is the pass will try to create as many parallel loops as possible,
even though this will require more space (Dimensions) for the temporaries.
Defaults to False.
* 'cire-rotate': if True, the pass will use modulo indexing for the
outermost Dimension iterated over by the temporaries. This will sacrifice
a parallel loop for a reduced working set size. Defaults to False (legacy).
* 'cire-maxalias': if True, capture the largest redundancies. This will
minimize the flop count while maximizing the number of tensor temporaries,
thus increasing the working set size.
platform : Platform
The underlying platform. Used to optimize the shape of the introduced
tensor symbols.
Examples
--------
1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`
t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]
which after CIRE becomes
t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])
t0 = t1[x,y,z]*c[t,x,y,z]
2) 'sops'. Below we see two expressions in sum-of-product form (in this
case, the sum degenerates to a single product).
t0 = 2.0*a[x,y,z]*b[x,y,z]
t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]
CIRE detects that these two expressions are actually redundant and rewrites
them as:
t2[x,y,z] = a[x,y,z]*b[x,y,z]
t0 = 2.0*t2[x,y,z]
t1 = 3.0*t2[x,y,z+1]
|
from collections import OrderedDict, defaultdict, namedtuple
from functools import partial
from itertools import groupby
from cached_property import cached_property
import numpy as np
from devito.ir import (SEQUENTIAL, PARALLEL, PARALLEL_IF_PVT, ROUNDABLE, DataSpace,
Forward, IterationInstance, IterationSpace, Interval,
IntervalGroup, LabeledVector, Context, detect_accesses,
build_intervals, normalize_properties)
from devito.passes.clusters.utils import timed_pass
from devito.symbolics import (Uxmapper, compare_ops, estimate_cost, q_constant,
q_leaf, retrieve_indexed, search, uxreplace)
from devito.tools import as_tuple, flatten, split
from devito.types import (Array, TempFunction, Eq, Symbol, ModuloDimension,
CustomDimension, IncrDimension)
__all__ = ['cire']
# MASKED: cire function (lines 22-109)
def _cire(cluster, context, space, sregistry, options, platform):
# Construct the space of variants
variants = [modes[mode](sregistry, options).make_schedule(cluster, context)
for mode in space]
if not any(i.schedule for i in variants):
return [cluster]
# Pick the variant with the highest score, that is the variant with the best
# trade-off between operation count reduction and working set size increase
schedule, exprs = pick_best(variants)
# Schedule -> [Clusters]
schedule = optimize_schedule(cluster, schedule, platform, sregistry, options)
clusters, subs = lower_schedule(cluster, schedule, sregistry, options)
clusters.append(rebuild(cluster, exprs, subs, schedule))
return clusters
class Cire(object):
"""
Base class for CIRE transformers.
"""
optname = None
mode = None
def __init__(self, sregistry, options):
self.sregistry = sregistry
self._opt_minstorage = options['min-storage']
self._opt_mincost = options['cire-mincost'][self.optname]
self._opt_maxpar = options['cire-maxpar']
self._opt_maxalias = options['cire-maxalias']
def make_schedule(self, cluster, context):
# Capture aliases within `exprs`
aliases = AliasMapper()
score = 0
exprs = cluster.exprs
ispace = cluster.ispace
for n in range(self._nrepeats(cluster)):
# Extract potentially aliasing expressions
mapper = self._extract(exprs, context, n)
# Search aliasing expressions
found = collect(mapper.extracted, ispace, self._opt_minstorage)
# Choose the aliasing expressions with a good flops/memory trade-off
exprs, chosen, pscore = choose(found, exprs, mapper, self._selector)
aliases.update(chosen)
score += pscore
# AliasMapper -> Schedule
schedule = lower_aliases(cluster, aliases, self._in_writeto, self._opt_maxpar)
# The actual score is a 2-tuple <flop-reduction-score, workin-set-score>
score = (score, len(aliases))
return SpacePoint(schedule, exprs, score)
def _make_symbol(self):
return Symbol(name=self.sregistry.make_name('dummy'))
def _nrepeats(self, cluster):
raise NotImplementedError
def _extract(self, exprs, context, n):
raise NotImplementedError
def _in_writeto(self, dim, cluster):
raise NotImplementedError
def _selector(self, e, naliases):
raise NotImplementedError
class CireInvariants(Cire):
optname = 'invariants'
def _nrepeats(self, cluster):
return 1
def _rule(self, e):
return (e.is_Function or
(e.is_Pow and e.exp.is_Number and e.exp < 1))
def _extract(self, exprs, context, n):
mapper = Uxmapper()
for prefix, clusters in context.items():
if not prefix:
continue
exclude = set().union(*[c.scope.writes for c in clusters])
exclude.add(prefix[-1].dim)
for e in exprs:
for i in search(e, self._rule, 'all', 'bfs_first_hit'):
if {a.function for a in i.free_symbols} & exclude:
continue
mapper.add(i, self._make_symbol)
return mapper
def _in_writeto(self, dim, cluster):
return PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if all(i.function.is_Symbol for i in e.free_symbols):
# E.g., `dt**(-2)`
mincost = self._opt_mincost['scalar']
else:
mincost = self._opt_mincost['tensor']
return estimate_cost(e, True)*naliases // mincost
class CireInvariantsBasic(CireInvariants):
mode = 'inv-basic'
class CireInvariantsCompound(CireInvariants):
mode = 'inv-compound'
def _extract(self, exprs, context, n):
extracted = super()._extract(exprs, context, n).extracted
rule = lambda e: any(a in extracted for a in e.args)
mapper = Uxmapper()
for e in exprs:
for i in search(e, rule, 'all', 'dfs'):
if not i.is_commutative:
continue
key = lambda a: a in extracted
terms, others = split(i.args, key)
mapper.add(i, self._make_symbol, terms)
return mapper
class CireSOPS(Cire):
optname = 'sops'
mode = 'sops'
def _nrepeats(self, cluster):
# The `nrepeats` is calculated such that we analyze all potential derivatives
# in `cluster`
return potential_max_deriv_order(cluster.exprs)
def _extract(self, exprs, context, n):
# Forbid CIRE involving Dimension-independent dependencies, e.g.:
# r0 = ...
# u[x, y] = ... r0*a[x, y] ...
# NOTE: if one uses the DSL in a conventional way and sticks to the default
# compilation pipelines where CSE always happens after CIRE, then `exclude`
# will always be empty
exclude = {i.source.indexed for i in context[None].scope.d_flow.independent()}
mapper = Uxmapper()
for e in exprs:
for i in search_potential_deriv(e, n):
if i.free_symbols & exclude:
continue
key = lambda a: a.is_Add
terms, others = split(i.args, key)
if self._opt_maxalias:
# Treat `e` as an FD expression and pull out the derivative
# coefficient from `i`
# Note: typically derivative coefficients are numbers, but
# sometimes they could be provided in symbolic form through an
# arbitrary Function. In the latter case, we rely on the
# heuristic that such Function's basically never span the whole
# grid, but rather a single Grid dimension (e.g., `c[z, n]` for a
# stencil of diameter `n` along `z`)
if e.grid is not None and terms:
key = partial(maybe_coeff_key, e.grid)
others, more_terms = split(others, key)
terms += more_terms
mapper.add(i, self._make_symbol, terms)
return mapper
def _in_writeto(self, dim, cluster):
return self._opt_maxpar and PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if naliases <= 1:
return 0
else:
return estimate_cost(e, True)*naliases // self._opt_mincost
modes = {
CireInvariantsBasic.mode: CireInvariantsBasic,
CireInvariantsCompound.mode: CireInvariantsCompound,
CireSOPS.mode: CireSOPS
}
def collect(extracted, ispace, min_storage):
"""
Find groups of aliasing expressions.
We shall introduce the following (loose) terminology:
* A ``terminal`` is the leaf of a mathematical operation. Terminals
can be numbers (n), literals (l), or Indexeds (I).
* ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,
``R(I) = J``, where ``J`` has the same base as ``I`` but with all
offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.
* A ``relaxed expression`` is an expression in which all of the
terminals are relaxed.
Now we define the concept of aliasing. We say that an expression A
aliases an expression B if:
* ``R(A) == R(B)``
* all pairwise Indexeds in A and B access memory locations at a
fixed constant distance along each Dimension.
For example, consider the following expressions:
* a[i+1] + b[i+1]
* a[i+1] + b[j+1]
* a[i] + c[i]
* a[i+2] - b[i+2]
* a[i+2] + b[i]
* a[i-1] + b[i-1]
Out of the expressions above, the following alias to `a[i] + b[i]`:
* a[i+1] + b[i+1] : same operands and operations, distance along i: 1
* a[i-1] + b[i-1] : same operands and operations, distance along i: -1
Whereas the following do not:
* a[i+1] + b[j+1] : because at least one index differs
* a[i] + c[i] : because at least one of the operands differs
* a[i+2] - b[i+2] : because at least one operation differs
* a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0)
"""
# Find the potential aliases
found = []
for expr in extracted:
assert not expr.is_Equality
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for e, ai in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, e - ai))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if not indexeds or len(bases) == len(indexeds):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
# Create groups of aliasing expressions
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
# Is the arithmetic structure of `c` and `u` equivalent ?
if not compare_ops(c.expr, u.expr):
continue
# Is `c` translated w.r.t. `u` ?
if not c.translated(u):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
# For each Dimension, determine the Minimum Intervals (MI) spanning
# all of the Groups diameters
# Example: x's largest_diameter=2 => [x[-2,0], x[-1,1], x[0,2]]
# Note: Groups that cannot evaluate their diameter are dropped
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for d, v in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for d, v in mapper.items()}
# For each Group, find a rotation that is compatible with a given MI
mapper = {}
for d, intervals in intervalss.items():
# Not all groups may access all dimensions
# Example: `d=t` and groups=[Group(...[t, x]...), Group(...[time, x]...)]
impacted = [g for g in groups if d in g.dimensions]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(distance is not None for distance in found.values()):
# `interval` is OK !
mapper[interval] = found
break
if len(mapper) == len(intervalss):
break
# Try again with fewer groups
# Heuristic: first try retaining the larger ones
smallest = len(min(groups, key=len))
fallback = groups
groups, remainder = split(groups, lambda g: len(g) > smallest)
if groups:
queue.append(remainder)
elif len(remainder) > 1:
# No luck with the heuristic, e.g. there are two groups
# and both have same `len`
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for i, v in mapper.items()])
# Create the basis alias
offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels])
for v in c.offsets]
subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]]
for i, b, v in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
# All aliased expressions
aliaseds = [extracted[i.expr] for i in g]
# Distance of each aliased expression from the basis alias
distances = []
for i in g:
distance = [o.distance(v) for o, v in zip(i.offsets, offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for d, v in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases
def choose(aliases, exprs, mapper, selector):
"""
Analyze the detected aliases and, after applying a cost model to rule out
the aliases with a bad flops/memory trade-off, inject them into the original
expressions.
"""
tot = 0
retained = AliasMapper()
# Pass 1: a set of aliasing expressions is retained only if its cost
# exceeds the mode's threshold
candidates = OrderedDict()
aliaseds = []
others = []
for e, v in aliases.items():
score = selector(e, len(v.aliaseds))
if score > 0:
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
# Do not waste time if unneccesary
if not candidates:
return exprs, retained, tot
# Project the candidate aliases into exprs to determine what the new
# working set would be
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(aliaseds)}
templated = [uxreplace(e, mapper) for e in exprs]
# Pass 2: a set of aliasing expressions is retained only if the tradeoff
# between operation count reduction and working set increase is favorable
owset = wset(others + templated)
for e, v in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if score > 1 or \
score == 1 and max(len(wset(e)), 1) > len(wset(e) & owset):
retained[e] = v
tot += score
# Do not waste time if unneccesary
if not retained:
return exprs, retained, tot
# Substitute the chosen aliasing sub-expressions
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(retained.aliaseds)}
exprs = [uxreplace(e, mapper) for e in exprs]
return exprs, retained, tot
def lower_aliases(cluster, aliases, in_writeto, maxpar):
"""
Create a Schedule from an AliasMapper.
"""
dmapper = {}
processed = []
for alias, v in aliases.items():
imapper = {**{i.dim: i for i in v.intervals},
**{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
# E.g., `x0_blk0` or (`a[y_m+1]` => `y not in imapper`)
intervals.append(i)
continue
assert i.stamp >= interval.stamp
if not (writeto or interval != interval.zero() or in_writeto(i.dim, cluster)):
# The alias doesn't require a temporary Dimension along i.dim
intervals.append(i)
continue
assert not i.dim.is_NonlinearDerived
# `i.dim` is necessarily part of the write-to region, so
# we have to adjust the Interval's stamp. For example, consider
# `i=x[0,0]<1>` and `interval=x[-4,4]<0>`; here we need to
# use `<1>` as stamp, which is what appears in `cluster`
interval = interval.lift(i.stamp)
# We further bump the interval stamp if we were requested to trade
# fusion for more collapse-parallelism
interval = interval.lift(interval.stamp + int(maxpar))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
# Suitable IncrDimensions must be used to avoid OOB accesses.
# E.g., r[xs][ys][z] => both `xs` and `ys` must be initialized such
# that all accesses are within bounds. This requires traversing the
# hierarchy of IncrDimensions to set `xs` (`ys`) in a way that
# consecutive blocks access consecutive regions in `r` (e.g.,
# `xs=x0_blk1-x0_blk0` with `blocklevels=2`; `xs=0` with
# `blocklevels=1`, that is it degenerates in this case)
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
# An IncrDimension in between IncrDimensions
m = i.dim.symbolic_min - i.dim.parent.symbolic_min
else:
m = 0
d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m,
dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
# Given the iteration `interval`, lower distances to indices
for distance, indices in zip(v.distances, indicess):
indices.append(d - interval.lower + distance[interval.dim])
# The alias write-to space
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
# The alias iteration space
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
# The [ScheduledAliases] must be ordered so as to reuse as many of the
# `cluster`'s IterationIntervals as possible in order to honor the
# write-to region. Another fundamental reason for ordering is to ensure
# deterministic code generation
processed = sorted(processed, key=lambda i: cit(cluster.ispace, i.ispace))
return Schedule(*processed, dmapper=dmapper)
def optimize_schedule(cluster, schedule, platform, sregistry, options):
"""
Rewrite the schedule for performance optimization.
"""
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule
def _optimize_schedule_rotations(schedule, sregistry):
"""
Transform the schedule such that the tensor temporaries "rotate" along
the outermost Dimension. This trades a parallel Dimension for a smaller
working set size.
"""
# The rotations Dimension is the outermost
ridx = 0
rmapper = defaultdict(list)
processed = []
for k, group in groupby(schedule, key=lambda i: i.writeto):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
# Can't do anything if `d` isn't an IncrDimension over a block
processed.extend(g)
continue
n = candidate.min_size
assert n > 0
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension('%sii' % d, ds, iis, incr=iib)
cd = CustomDimension(name='%s%s' % (d, d), symbolic_min=ii, symbolic_max=iib,
symbolic_size=n)
dsi = ModuloDimension('%si' % ds, cd, cd + ds - iis, n)
mapper = OrderedDict()
for i in g:
# Update `indicess` to use `xs0`, `xs1`, ...
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix='%sr' % d.name)
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [indices[:ridx] + [md] + indices[ridx + 1:]
for md, indices in zip(mds, i.indicess)]
# Update `writeto` by switching `d` to `dsi`
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
# Transform `alias` by adding `i`
alias = i.alias.xreplace({d: d + cd})
# Extend `ispace` to iterate over rotations
d1 = writeto[ridx+1].dim # Note: we're by construction in-bounds here
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: mds + [ii]})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
# Update the rotations mapper
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper)
def _optimize_schedule_padding(cluster, schedule, platform):
"""
Round up the innermost IterationInterval of the tensor temporaries IterationSpace
to a multiple of the SIMD vector length. This is not always possible though (it
depends on how much halo is safely accessible in all read Functions).
"""
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[-1]
if ROUNDABLE in cluster.properties[it.dim]:
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, it.interval.size % vl))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds,
i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper)
def lower_schedule(cluster, schedule, sregistry, options):
"""
Turn a Schedule into a sequence of Clusters.
"""
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
# Typical case -- the user does *not* "see" the CIRE-created temporaries
make = Array
clusters = []
subs = {}
for alias, writeto, ispace, aliaseds, indicess in schedule:
# Basic info to create the temporary that will hold the alias
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
# The Dimensions defining the shape of Array
# Note: with SubDimensions, we may have the following situation:
#
# for zi = z_m + zi_ltkn; zi <= z_M - zi_rtkn; ...
# r[zi] = ...
#
# Instead of `r[zi - z_m - zi_ltkn]` we have just `r[zi]`, so we'll need
# as much room as in `zi`'s parent to avoid going OOB
# Aside from ugly generated code, the reason we do not rather shift the
# indices is that it prevents future passes to transform the loop bounds
# (e.g., MPI's comp/comm overlap does that)
dimensions = [d.parent if d.is_Sub else d for d in writeto.itdimensions]
# The halo must be set according to the size of writeto space
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
# The indices used to write into the Array
indices = []
for i in writeto:
try:
# E.g., `xs`
sub_iterators = writeto.sub_iterators[i.dim]
assert len(sub_iterators) == 1
indices.append(sub_iterators[0])
except KeyError:
# E.g., `z` -- a non-shifted Dimension
indices.append(i.dim - i.lower)
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = lambda idx: obj[idx]
else:
# Degenerate case: scalar expression
assert writeto.size == 0
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = lambda idx: obj
# Create the substitution rules for the aliasing expressions
subs.update({aliased: callback(indices)
for aliased, indices in zip(aliaseds, indicess)})
# Construct the `alias` DataSpace
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
# Drop or weaken parallelism if necessary
properties = dict(cluster.properties)
for d, v in cluster.properties.items():
if any(i.is_Modulo for i in ispace.sub_iterators[d]):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif d not in writeto.dimensions:
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
# Finally, build the `alias` Cluster
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace,
dspace=dspace, properties=properties))
return clusters, subs
def pick_best(variants):
"""
Use the variant score and heuristics to return the variant with the best
trade-off between operation count reduction and working set increase.
"""
best = variants.pop(0)
for i in variants:
best_flop_score, best_ws_score = best.score
if best_flop_score == 0:
best = i
continue
i_flop_score, i_ws_score = i.score
# The current heustic is fairly basic: the one with smaller working
# set size increase wins, unless there's a massive reduction in operation
# count in the other one
delta = i_ws_score - best_ws_score
if (delta > 0 and i_flop_score / best_flop_score > 100) or \
(delta == 0 and i_flop_score > best_flop_score) or \
(delta < 0 and best_flop_score / i_flop_score <= 100):
best = i
schedule, exprs, _ = best
return schedule, exprs
def rebuild(cluster, exprs, subs, schedule):
"""
Plug the optimized aliases into the input Cluster. This leads to creating
a new Cluster with suitable IterationSpace and DataSpace.
"""
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace)
# Utilities
class Candidate(object):
def __init__(self, expr, ispace, indexeds, bases, offsets):
self.expr = expr
self.shifts = ispace.intervals
self.indexeds = indexeds
self.bases = bases
self.offsets = offsets
def __repr__(self):
return "Candidate(expr=%s)" % self.expr
def translated(self, other):
"""
True if ``self`` is translated w.r.t. ``other``, False otherwise.
Examples
--------
Two candidates are translated if their bases are the same and
their offsets are pairwise translated.
c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}
u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}
Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`
"""
if len(self.Toffsets) != len(other.Toffsets):
return False
if len(self.bases) != len(other.bases):
return False
# Check the bases
if any(b0 != b1 for b0, b1 in zip(self.bases, other.bases)):
return False
# Check the offsets
for (d0, o0), (d1, o1) in zip(self.Toffsets, other.Toffsets):
if d0 is not d1:
return False
distance = set(o0 - o1)
if len(distance) != 1:
return False
return True
@cached_property
def Toffsets(self):
return LabeledVector.transpose(*self.offsets)
@cached_property
def dimensions(self):
return frozenset(i for i, _ in self.Toffsets)
class Group(tuple):
"""
A collection of aliasing expressions.
"""
def __repr__(self):
return "Group(%s)" % ", ".join([str(i) for i in self])
def find_rotation_distance(self, d, interval):
"""
The distance from the Group pivot of a rotation along Dimension ``d`` that
can safely iterate over the ``interval``.
"""
assert d is interval.dim
for rotation, distance in self._pivot_legal_rotations[d]:
# Does `rotation` cover the `interval` ?
if rotation.union(interval) != rotation:
continue
# Infer the `rotation`'s min_intervals from the pivot's
min_interval = self._pivot_min_intervals[d].translate(-distance)
# Does the `interval` actually cover the `rotation`'s `min_interval`?
if interval.union(min_interval) == interval:
return distance
return None
@cached_property
def Toffsets(self):
return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])]
@cached_property
def diameter(self):
"""
The size of the iteration space required to evaluate all aliasing expressions
in this Group, along each Dimension.
"""
ret = defaultdict(int)
for i in self.Toffsets:
for d, v in i:
try:
distance = int(max(v) - min(v))
except TypeError:
# An entry in `v` has symbolic components, e.g. `x_m + 2`
if len(set(v)) == 1:
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret
@property
def pivot(self):
"""
A deterministically chosen Candidate for this Group.
"""
return self[0]
@property
def dimensions(self):
return self.pivot.dimensions
@property
def dimensions_translated(self):
return frozenset(d for d, v in self.diameter.items() if v > 0)
@cached_property
def _pivot_legal_rotations(self):
"""
All legal rotations along each Dimension for the Group pivot.
"""
ret = {}
for d, (maxd, mini) in self._pivot_legal_shifts.items():
# Rotation size = mini (min-increment) - maxd (max-decrement)
v = mini - maxd
# Build the table of all possible rotations
m = make_rotations_table(d, v)
distances = []
for rotation in m:
# Distance of the rotation `i` from `c`
distance = maxd - rotation.lower
assert distance == mini - rotation.upper
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret
@cached_property
def _pivot_min_intervals(self):
"""
The minimum Interval along each Dimension such that by evaluating the
pivot, all Candidates are evaluated too.
"""
c = self.pivot
ret = defaultdict(lambda: [np.inf, -np.inf])
for i in self:
distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
for d, v in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for d, (m, M) in ret.items()}
return ret
@cached_property
def _pivot_legal_shifts(self):
"""
The max decrement and min increment along each Dimension such that the
Group pivot does not go OOB.
"""
c = self.pivot
ret = defaultdict(lambda: (-np.inf, np.inf))
for i, ofs in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
# `f`'s cumulative halo size along `l`
hsize = sum(f._size_halo[l])
# Any `ofs`'s shift due to non-[0,0] iteration space
lower, upper = c.shifts[l].offsets
try:
# Assume `ofs[d]` is a number (typical case)
maxd = min(0, max(ret[l][0], -ofs[l] - lower))
mini = max(0, min(ret[l][1], hsize - ofs[l] - upper))
ret[l] = (maxd, mini)
except TypeError:
# E.g., `ofs[d] = x_m - x + 5`
ret[l] = (0, 0)
return ret
AliasedGroup = namedtuple('AliasedGroup', 'intervals aliaseds distances')
ScheduledAlias = namedtuple('ScheduledAlias', 'alias writeto ispace aliaseds indicess')
ScheduledAlias.__new__.__defaults__ = (None,) * len(ScheduledAlias._fields)
SpacePoint = namedtuple('SpacePoint', 'schedule exprs score')
class Schedule(tuple):
def __new__(cls, *items, dmapper=None, rmapper=None):
obj = super(Schedule, cls).__new__(cls, items)
obj.dmapper = dmapper or {}
obj.rmapper = rmapper or {}
return obj
class AliasMapper(OrderedDict):
def add(self, alias, intervals, aliaseds, distances):
assert len(aliaseds) == len(distances)
self[alias] = AliasedGroup(intervals, aliaseds, distances)
def update(self, aliases):
for k, v in aliases.items():
try:
v0 = self[k]
if v0.intervals != v.intervals:
raise ValueError
v0.aliaseds.extend(v.aliaseds)
v0.distances.extend(v.distances)
except KeyError:
self[k] = v
@property
def aliaseds(self):
return flatten(i.aliaseds for i in self.values())
def make_rotations_table(d, v):
"""
All possible rotations of `range(v+1)`.
"""
m = np.array([[j-i if j > i else 0 for j in range(v+1)] for i in range(v+1)])
m = (m - m.T)[::-1, :]
# Shift the table so that the middle rotation is at the top
m = np.roll(m, int(-np.floor(v/2)), axis=0)
# Turn into a more compact representation as a list of Intervals
m = [Interval(d, min(i), max(i)) for i in m]
return m
def cit(ispace0, ispace1):
"""
The Common IterationIntervals of two IterationSpaces.
"""
found = []
for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals):
if it0 == it1:
found.append(it0)
else:
break
return tuple(found)
def maybe_coeff_key(grid, expr):
"""
True if `expr` could be the coefficient of an FD derivative, False otherwise.
"""
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(not set(grid.dimensions) <= set(i.function.dimensions) for i in indexeds)
def wset(exprs):
"""
Extract the working set out of a set of equations.
"""
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)])
if i.function.is_AbstractFunction}
def potential_max_deriv_order(exprs):
"""
The maximum FD derivative order in a list of expressions.
"""
# NOTE: e might propagate the Derivative(...) information down from the
# symbolic language, but users may do crazy things and write their own custom
# expansions "by hand" (i.e., not resorting to Derivative(...)), hence instead
# of looking for Derivative(...) we use the following heuristic:
# add(mul, mul, ...) -> stems from first order derivative
# add(mul(add(mul, mul, ...), ...), ...) -> stems from second order derivative
# ...
nadds = lambda e: (int(e.is_Add) +
max([nadds(a) for a in e.args], default=0) if not q_leaf(e) else 0)
return max([nadds(e) for e in exprs], default=0)
def search_potential_deriv(expr, n, c=0):
"""
Retrieve the expressions at depth `n` that potentially stem from FD derivatives.
"""
assert n >= c >= 0
if q_leaf(expr) or expr.is_Pow:
return []
elif expr.is_Mul:
if c == n:
return [expr]
else:
return flatten([search_potential_deriv(a, n, c+1) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args])
|
@timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
"""
Cross-iteration redundancies elimination.
Parameters
----------
cluster : Cluster
Input Cluster, subject of the optimization pass.
mode : str
The transformation mode. Accepted: ['invariants', 'sops'].
* 'invariants' is for sub-expressions that are invariant w.r.t. one or
more Dimensions.
* 'sops' stands for sums-of-products, that is redundancies are searched
across all expressions in sum-of-product form.
sregistry : SymbolRegistry
The symbol registry, to create unique temporary names.
options : dict
The optimization options.
Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].
* 'min-storage': if True, the pass will try to minimize the amount of
storage introduced for the tensor temporaries. This might also reduce
the operation count. On the other hand, this might affect fusion and
therefore data locality. Defaults to False (legacy).
* 'cire-maxpar': if True, privilege parallelism over working set size,
that is the pass will try to create as many parallel loops as possible,
even though this will require more space (Dimensions) for the temporaries.
Defaults to False.
* 'cire-rotate': if True, the pass will use modulo indexing for the
outermost Dimension iterated over by the temporaries. This will sacrifice
a parallel loop for a reduced working set size. Defaults to False (legacy).
* 'cire-maxalias': if True, capture the largest redundancies. This will
minimize the flop count while maximizing the number of tensor temporaries,
thus increasing the working set size.
platform : Platform
The underlying platform. Used to optimize the shape of the introduced
tensor symbols.
Examples
--------
1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`
t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]
which after CIRE becomes
t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])
t0 = t1[x,y,z]*c[t,x,y,z]
2) 'sops'. Below we see two expressions in sum-of-product form (in this
case, the sum degenerates to a single product).
t0 = 2.0*a[x,y,z]*b[x,y,z]
t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]
CIRE detects that these two expressions are actually redundant and rewrites
them as:
t2[x,y,z] = a[x,y,z]*b[x,y,z]
t0 = 2.0*t2[x,y,z]
t1 = 3.0*t2[x,y,z+1]
"""
if mode == 'invariants':
space = ('inv-basic', 'inv-compound')
elif mode in ('sops',):
space = (mode,)
else:
assert False, "Unknown CIRE mode `%s`" % mode
processed = []
for c in clusters:
# We don't care about sparse Clusters. Their computational cost is
# negligible and processing all of them would only increase compilation
# time and potentially make the generated code more chaotic
if not c.is_dense:
processed.append(c)
continue
# Some of the CIRE transformers need to look inside all scopes
# surrounding `c` to perform data dependencies analysis
context = Context(c).process(clusters)
# Applying CIRE may change `c` as well as creating one or more new Clusters
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed
| 22
| 109
|
from collections import OrderedDict, defaultdict, namedtuple
from functools import partial
from itertools import groupby
from cached_property import cached_property
import numpy as np
from devito.ir import (SEQUENTIAL, PARALLEL, PARALLEL_IF_PVT, ROUNDABLE, DataSpace,
Forward, IterationInstance, IterationSpace, Interval,
IntervalGroup, LabeledVector, Context, detect_accesses,
build_intervals, normalize_properties)
from devito.passes.clusters.utils import timed_pass
from devito.symbolics import (Uxmapper, compare_ops, estimate_cost, q_constant,
q_leaf, retrieve_indexed, search, uxreplace)
from devito.tools import as_tuple, flatten, split
from devito.types import (Array, TempFunction, Eq, Symbol, ModuloDimension,
CustomDimension, IncrDimension)
__all__ = ['cire']
@timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
"""
Cross-iteration redundancies elimination.
Parameters
----------
cluster : Cluster
Input Cluster, subject of the optimization pass.
mode : str
The transformation mode. Accepted: ['invariants', 'sops'].
* 'invariants' is for sub-expressions that are invariant w.r.t. one or
more Dimensions.
* 'sops' stands for sums-of-products, that is redundancies are searched
across all expressions in sum-of-product form.
sregistry : SymbolRegistry
The symbol registry, to create unique temporary names.
options : dict
The optimization options.
Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].
* 'min-storage': if True, the pass will try to minimize the amount of
storage introduced for the tensor temporaries. This might also reduce
the operation count. On the other hand, this might affect fusion and
therefore data locality. Defaults to False (legacy).
* 'cire-maxpar': if True, privilege parallelism over working set size,
that is the pass will try to create as many parallel loops as possible,
even though this will require more space (Dimensions) for the temporaries.
Defaults to False.
* 'cire-rotate': if True, the pass will use modulo indexing for the
outermost Dimension iterated over by the temporaries. This will sacrifice
a parallel loop for a reduced working set size. Defaults to False (legacy).
* 'cire-maxalias': if True, capture the largest redundancies. This will
minimize the flop count while maximizing the number of tensor temporaries,
thus increasing the working set size.
platform : Platform
The underlying platform. Used to optimize the shape of the introduced
tensor symbols.
Examples
--------
1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`
t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]
which after CIRE becomes
t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])
t0 = t1[x,y,z]*c[t,x,y,z]
2) 'sops'. Below we see two expressions in sum-of-product form (in this
case, the sum degenerates to a single product).
t0 = 2.0*a[x,y,z]*b[x,y,z]
t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]
CIRE detects that these two expressions are actually redundant and rewrites
them as:
t2[x,y,z] = a[x,y,z]*b[x,y,z]
t0 = 2.0*t2[x,y,z]
t1 = 3.0*t2[x,y,z+1]
"""
if mode == 'invariants':
space = ('inv-basic', 'inv-compound')
elif mode in ('sops',):
space = (mode,)
else:
assert False, "Unknown CIRE mode `%s`" % mode
processed = []
for c in clusters:
# We don't care about sparse Clusters. Their computational cost is
# negligible and processing all of them would only increase compilation
# time and potentially make the generated code more chaotic
if not c.is_dense:
processed.append(c)
continue
# Some of the CIRE transformers need to look inside all scopes
# surrounding `c` to perform data dependencies analysis
context = Context(c).process(clusters)
# Applying CIRE may change `c` as well as creating one or more new Clusters
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed
def _cire(cluster, context, space, sregistry, options, platform):
# Construct the space of variants
variants = [modes[mode](sregistry, options).make_schedule(cluster, context)
for mode in space]
if not any(i.schedule for i in variants):
return [cluster]
# Pick the variant with the highest score, that is the variant with the best
# trade-off between operation count reduction and working set size increase
schedule, exprs = pick_best(variants)
# Schedule -> [Clusters]
schedule = optimize_schedule(cluster, schedule, platform, sregistry, options)
clusters, subs = lower_schedule(cluster, schedule, sregistry, options)
clusters.append(rebuild(cluster, exprs, subs, schedule))
return clusters
class Cire(object):
"""
Base class for CIRE transformers.
"""
optname = None
mode = None
def __init__(self, sregistry, options):
self.sregistry = sregistry
self._opt_minstorage = options['min-storage']
self._opt_mincost = options['cire-mincost'][self.optname]
self._opt_maxpar = options['cire-maxpar']
self._opt_maxalias = options['cire-maxalias']
def make_schedule(self, cluster, context):
# Capture aliases within `exprs`
aliases = AliasMapper()
score = 0
exprs = cluster.exprs
ispace = cluster.ispace
for n in range(self._nrepeats(cluster)):
# Extract potentially aliasing expressions
mapper = self._extract(exprs, context, n)
# Search aliasing expressions
found = collect(mapper.extracted, ispace, self._opt_minstorage)
# Choose the aliasing expressions with a good flops/memory trade-off
exprs, chosen, pscore = choose(found, exprs, mapper, self._selector)
aliases.update(chosen)
score += pscore
# AliasMapper -> Schedule
schedule = lower_aliases(cluster, aliases, self._in_writeto, self._opt_maxpar)
# The actual score is a 2-tuple <flop-reduction-score, workin-set-score>
score = (score, len(aliases))
return SpacePoint(schedule, exprs, score)
def _make_symbol(self):
return Symbol(name=self.sregistry.make_name('dummy'))
def _nrepeats(self, cluster):
raise NotImplementedError
def _extract(self, exprs, context, n):
raise NotImplementedError
def _in_writeto(self, dim, cluster):
raise NotImplementedError
def _selector(self, e, naliases):
raise NotImplementedError
class CireInvariants(Cire):
optname = 'invariants'
def _nrepeats(self, cluster):
return 1
def _rule(self, e):
return (e.is_Function or
(e.is_Pow and e.exp.is_Number and e.exp < 1))
def _extract(self, exprs, context, n):
mapper = Uxmapper()
for prefix, clusters in context.items():
if not prefix:
continue
exclude = set().union(*[c.scope.writes for c in clusters])
exclude.add(prefix[-1].dim)
for e in exprs:
for i in search(e, self._rule, 'all', 'bfs_first_hit'):
if {a.function for a in i.free_symbols} & exclude:
continue
mapper.add(i, self._make_symbol)
return mapper
def _in_writeto(self, dim, cluster):
return PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if all(i.function.is_Symbol for i in e.free_symbols):
# E.g., `dt**(-2)`
mincost = self._opt_mincost['scalar']
else:
mincost = self._opt_mincost['tensor']
return estimate_cost(e, True)*naliases // mincost
class CireInvariantsBasic(CireInvariants):
mode = 'inv-basic'
class CireInvariantsCompound(CireInvariants):
mode = 'inv-compound'
def _extract(self, exprs, context, n):
extracted = super()._extract(exprs, context, n).extracted
rule = lambda e: any(a in extracted for a in e.args)
mapper = Uxmapper()
for e in exprs:
for i in search(e, rule, 'all', 'dfs'):
if not i.is_commutative:
continue
key = lambda a: a in extracted
terms, others = split(i.args, key)
mapper.add(i, self._make_symbol, terms)
return mapper
class CireSOPS(Cire):
optname = 'sops'
mode = 'sops'
def _nrepeats(self, cluster):
# The `nrepeats` is calculated such that we analyze all potential derivatives
# in `cluster`
return potential_max_deriv_order(cluster.exprs)
def _extract(self, exprs, context, n):
# Forbid CIRE involving Dimension-independent dependencies, e.g.:
# r0 = ...
# u[x, y] = ... r0*a[x, y] ...
# NOTE: if one uses the DSL in a conventional way and sticks to the default
# compilation pipelines where CSE always happens after CIRE, then `exclude`
# will always be empty
exclude = {i.source.indexed for i in context[None].scope.d_flow.independent()}
mapper = Uxmapper()
for e in exprs:
for i in search_potential_deriv(e, n):
if i.free_symbols & exclude:
continue
key = lambda a: a.is_Add
terms, others = split(i.args, key)
if self._opt_maxalias:
# Treat `e` as an FD expression and pull out the derivative
# coefficient from `i`
# Note: typically derivative coefficients are numbers, but
# sometimes they could be provided in symbolic form through an
# arbitrary Function. In the latter case, we rely on the
# heuristic that such Function's basically never span the whole
# grid, but rather a single Grid dimension (e.g., `c[z, n]` for a
# stencil of diameter `n` along `z`)
if e.grid is not None and terms:
key = partial(maybe_coeff_key, e.grid)
others, more_terms = split(others, key)
terms += more_terms
mapper.add(i, self._make_symbol, terms)
return mapper
def _in_writeto(self, dim, cluster):
return self._opt_maxpar and PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if naliases <= 1:
return 0
else:
return estimate_cost(e, True)*naliases // self._opt_mincost
modes = {
CireInvariantsBasic.mode: CireInvariantsBasic,
CireInvariantsCompound.mode: CireInvariantsCompound,
CireSOPS.mode: CireSOPS
}
def collect(extracted, ispace, min_storage):
"""
Find groups of aliasing expressions.
We shall introduce the following (loose) terminology:
* A ``terminal`` is the leaf of a mathematical operation. Terminals
can be numbers (n), literals (l), or Indexeds (I).
* ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,
``R(I) = J``, where ``J`` has the same base as ``I`` but with all
offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.
* A ``relaxed expression`` is an expression in which all of the
terminals are relaxed.
Now we define the concept of aliasing. We say that an expression A
aliases an expression B if:
* ``R(A) == R(B)``
* all pairwise Indexeds in A and B access memory locations at a
fixed constant distance along each Dimension.
For example, consider the following expressions:
* a[i+1] + b[i+1]
* a[i+1] + b[j+1]
* a[i] + c[i]
* a[i+2] - b[i+2]
* a[i+2] + b[i]
* a[i-1] + b[i-1]
Out of the expressions above, the following alias to `a[i] + b[i]`:
* a[i+1] + b[i+1] : same operands and operations, distance along i: 1
* a[i-1] + b[i-1] : same operands and operations, distance along i: -1
Whereas the following do not:
* a[i+1] + b[j+1] : because at least one index differs
* a[i] + c[i] : because at least one of the operands differs
* a[i+2] - b[i+2] : because at least one operation differs
* a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0)
"""
# Find the potential aliases
found = []
for expr in extracted:
assert not expr.is_Equality
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for e, ai in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, e - ai))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if not indexeds or len(bases) == len(indexeds):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
# Create groups of aliasing expressions
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
# Is the arithmetic structure of `c` and `u` equivalent ?
if not compare_ops(c.expr, u.expr):
continue
# Is `c` translated w.r.t. `u` ?
if not c.translated(u):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
# For each Dimension, determine the Minimum Intervals (MI) spanning
# all of the Groups diameters
# Example: x's largest_diameter=2 => [x[-2,0], x[-1,1], x[0,2]]
# Note: Groups that cannot evaluate their diameter are dropped
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for d, v in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for d, v in mapper.items()}
# For each Group, find a rotation that is compatible with a given MI
mapper = {}
for d, intervals in intervalss.items():
# Not all groups may access all dimensions
# Example: `d=t` and groups=[Group(...[t, x]...), Group(...[time, x]...)]
impacted = [g for g in groups if d in g.dimensions]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(distance is not None for distance in found.values()):
# `interval` is OK !
mapper[interval] = found
break
if len(mapper) == len(intervalss):
break
# Try again with fewer groups
# Heuristic: first try retaining the larger ones
smallest = len(min(groups, key=len))
fallback = groups
groups, remainder = split(groups, lambda g: len(g) > smallest)
if groups:
queue.append(remainder)
elif len(remainder) > 1:
# No luck with the heuristic, e.g. there are two groups
# and both have same `len`
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for i, v in mapper.items()])
# Create the basis alias
offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels])
for v in c.offsets]
subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]]
for i, b, v in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
# All aliased expressions
aliaseds = [extracted[i.expr] for i in g]
# Distance of each aliased expression from the basis alias
distances = []
for i in g:
distance = [o.distance(v) for o, v in zip(i.offsets, offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for d, v in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases
def choose(aliases, exprs, mapper, selector):
"""
Analyze the detected aliases and, after applying a cost model to rule out
the aliases with a bad flops/memory trade-off, inject them into the original
expressions.
"""
tot = 0
retained = AliasMapper()
# Pass 1: a set of aliasing expressions is retained only if its cost
# exceeds the mode's threshold
candidates = OrderedDict()
aliaseds = []
others = []
for e, v in aliases.items():
score = selector(e, len(v.aliaseds))
if score > 0:
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
# Do not waste time if unneccesary
if not candidates:
return exprs, retained, tot
# Project the candidate aliases into exprs to determine what the new
# working set would be
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(aliaseds)}
templated = [uxreplace(e, mapper) for e in exprs]
# Pass 2: a set of aliasing expressions is retained only if the tradeoff
# between operation count reduction and working set increase is favorable
owset = wset(others + templated)
for e, v in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if score > 1 or \
score == 1 and max(len(wset(e)), 1) > len(wset(e) & owset):
retained[e] = v
tot += score
# Do not waste time if unneccesary
if not retained:
return exprs, retained, tot
# Substitute the chosen aliasing sub-expressions
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(retained.aliaseds)}
exprs = [uxreplace(e, mapper) for e in exprs]
return exprs, retained, tot
def lower_aliases(cluster, aliases, in_writeto, maxpar):
"""
Create a Schedule from an AliasMapper.
"""
dmapper = {}
processed = []
for alias, v in aliases.items():
imapper = {**{i.dim: i for i in v.intervals},
**{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
# E.g., `x0_blk0` or (`a[y_m+1]` => `y not in imapper`)
intervals.append(i)
continue
assert i.stamp >= interval.stamp
if not (writeto or interval != interval.zero() or in_writeto(i.dim, cluster)):
# The alias doesn't require a temporary Dimension along i.dim
intervals.append(i)
continue
assert not i.dim.is_NonlinearDerived
# `i.dim` is necessarily part of the write-to region, so
# we have to adjust the Interval's stamp. For example, consider
# `i=x[0,0]<1>` and `interval=x[-4,4]<0>`; here we need to
# use `<1>` as stamp, which is what appears in `cluster`
interval = interval.lift(i.stamp)
# We further bump the interval stamp if we were requested to trade
# fusion for more collapse-parallelism
interval = interval.lift(interval.stamp + int(maxpar))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
# Suitable IncrDimensions must be used to avoid OOB accesses.
# E.g., r[xs][ys][z] => both `xs` and `ys` must be initialized such
# that all accesses are within bounds. This requires traversing the
# hierarchy of IncrDimensions to set `xs` (`ys`) in a way that
# consecutive blocks access consecutive regions in `r` (e.g.,
# `xs=x0_blk1-x0_blk0` with `blocklevels=2`; `xs=0` with
# `blocklevels=1`, that is it degenerates in this case)
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
# An IncrDimension in between IncrDimensions
m = i.dim.symbolic_min - i.dim.parent.symbolic_min
else:
m = 0
d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m,
dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
# Given the iteration `interval`, lower distances to indices
for distance, indices in zip(v.distances, indicess):
indices.append(d - interval.lower + distance[interval.dim])
# The alias write-to space
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
# The alias iteration space
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
# The [ScheduledAliases] must be ordered so as to reuse as many of the
# `cluster`'s IterationIntervals as possible in order to honor the
# write-to region. Another fundamental reason for ordering is to ensure
# deterministic code generation
processed = sorted(processed, key=lambda i: cit(cluster.ispace, i.ispace))
return Schedule(*processed, dmapper=dmapper)
def optimize_schedule(cluster, schedule, platform, sregistry, options):
"""
Rewrite the schedule for performance optimization.
"""
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule
def _optimize_schedule_rotations(schedule, sregistry):
"""
Transform the schedule such that the tensor temporaries "rotate" along
the outermost Dimension. This trades a parallel Dimension for a smaller
working set size.
"""
# The rotations Dimension is the outermost
ridx = 0
rmapper = defaultdict(list)
processed = []
for k, group in groupby(schedule, key=lambda i: i.writeto):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
# Can't do anything if `d` isn't an IncrDimension over a block
processed.extend(g)
continue
n = candidate.min_size
assert n > 0
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension('%sii' % d, ds, iis, incr=iib)
cd = CustomDimension(name='%s%s' % (d, d), symbolic_min=ii, symbolic_max=iib,
symbolic_size=n)
dsi = ModuloDimension('%si' % ds, cd, cd + ds - iis, n)
mapper = OrderedDict()
for i in g:
# Update `indicess` to use `xs0`, `xs1`, ...
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix='%sr' % d.name)
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [indices[:ridx] + [md] + indices[ridx + 1:]
for md, indices in zip(mds, i.indicess)]
# Update `writeto` by switching `d` to `dsi`
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
# Transform `alias` by adding `i`
alias = i.alias.xreplace({d: d + cd})
# Extend `ispace` to iterate over rotations
d1 = writeto[ridx+1].dim # Note: we're by construction in-bounds here
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: mds + [ii]})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
# Update the rotations mapper
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper)
def _optimize_schedule_padding(cluster, schedule, platform):
"""
Round up the innermost IterationInterval of the tensor temporaries IterationSpace
to a multiple of the SIMD vector length. This is not always possible though (it
depends on how much halo is safely accessible in all read Functions).
"""
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[-1]
if ROUNDABLE in cluster.properties[it.dim]:
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, it.interval.size % vl))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds,
i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper)
def lower_schedule(cluster, schedule, sregistry, options):
"""
Turn a Schedule into a sequence of Clusters.
"""
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
# Typical case -- the user does *not* "see" the CIRE-created temporaries
make = Array
clusters = []
subs = {}
for alias, writeto, ispace, aliaseds, indicess in schedule:
# Basic info to create the temporary that will hold the alias
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
# The Dimensions defining the shape of Array
# Note: with SubDimensions, we may have the following situation:
#
# for zi = z_m + zi_ltkn; zi <= z_M - zi_rtkn; ...
# r[zi] = ...
#
# Instead of `r[zi - z_m - zi_ltkn]` we have just `r[zi]`, so we'll need
# as much room as in `zi`'s parent to avoid going OOB
# Aside from ugly generated code, the reason we do not rather shift the
# indices is that it prevents future passes to transform the loop bounds
# (e.g., MPI's comp/comm overlap does that)
dimensions = [d.parent if d.is_Sub else d for d in writeto.itdimensions]
# The halo must be set according to the size of writeto space
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
# The indices used to write into the Array
indices = []
for i in writeto:
try:
# E.g., `xs`
sub_iterators = writeto.sub_iterators[i.dim]
assert len(sub_iterators) == 1
indices.append(sub_iterators[0])
except KeyError:
# E.g., `z` -- a non-shifted Dimension
indices.append(i.dim - i.lower)
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = lambda idx: obj[idx]
else:
# Degenerate case: scalar expression
assert writeto.size == 0
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = lambda idx: obj
# Create the substitution rules for the aliasing expressions
subs.update({aliased: callback(indices)
for aliased, indices in zip(aliaseds, indicess)})
# Construct the `alias` DataSpace
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
# Drop or weaken parallelism if necessary
properties = dict(cluster.properties)
for d, v in cluster.properties.items():
if any(i.is_Modulo for i in ispace.sub_iterators[d]):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif d not in writeto.dimensions:
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
# Finally, build the `alias` Cluster
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace,
dspace=dspace, properties=properties))
return clusters, subs
def pick_best(variants):
"""
Use the variant score and heuristics to return the variant with the best
trade-off between operation count reduction and working set increase.
"""
best = variants.pop(0)
for i in variants:
best_flop_score, best_ws_score = best.score
if best_flop_score == 0:
best = i
continue
i_flop_score, i_ws_score = i.score
# The current heustic is fairly basic: the one with smaller working
# set size increase wins, unless there's a massive reduction in operation
# count in the other one
delta = i_ws_score - best_ws_score
if (delta > 0 and i_flop_score / best_flop_score > 100) or \
(delta == 0 and i_flop_score > best_flop_score) or \
(delta < 0 and best_flop_score / i_flop_score <= 100):
best = i
schedule, exprs, _ = best
return schedule, exprs
def rebuild(cluster, exprs, subs, schedule):
"""
Plug the optimized aliases into the input Cluster. This leads to creating
a new Cluster with suitable IterationSpace and DataSpace.
"""
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace)
# Utilities
class Candidate(object):
def __init__(self, expr, ispace, indexeds, bases, offsets):
self.expr = expr
self.shifts = ispace.intervals
self.indexeds = indexeds
self.bases = bases
self.offsets = offsets
def __repr__(self):
return "Candidate(expr=%s)" % self.expr
def translated(self, other):
"""
True if ``self`` is translated w.r.t. ``other``, False otherwise.
Examples
--------
Two candidates are translated if their bases are the same and
their offsets are pairwise translated.
c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}
u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}
Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`
"""
if len(self.Toffsets) != len(other.Toffsets):
return False
if len(self.bases) != len(other.bases):
return False
# Check the bases
if any(b0 != b1 for b0, b1 in zip(self.bases, other.bases)):
return False
# Check the offsets
for (d0, o0), (d1, o1) in zip(self.Toffsets, other.Toffsets):
if d0 is not d1:
return False
distance = set(o0 - o1)
if len(distance) != 1:
return False
return True
@cached_property
def Toffsets(self):
return LabeledVector.transpose(*self.offsets)
@cached_property
def dimensions(self):
return frozenset(i for i, _ in self.Toffsets)
class Group(tuple):
"""
A collection of aliasing expressions.
"""
def __repr__(self):
return "Group(%s)" % ", ".join([str(i) for i in self])
def find_rotation_distance(self, d, interval):
"""
The distance from the Group pivot of a rotation along Dimension ``d`` that
can safely iterate over the ``interval``.
"""
assert d is interval.dim
for rotation, distance in self._pivot_legal_rotations[d]:
# Does `rotation` cover the `interval` ?
if rotation.union(interval) != rotation:
continue
# Infer the `rotation`'s min_intervals from the pivot's
min_interval = self._pivot_min_intervals[d].translate(-distance)
# Does the `interval` actually cover the `rotation`'s `min_interval`?
if interval.union(min_interval) == interval:
return distance
return None
@cached_property
def Toffsets(self):
return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])]
@cached_property
def diameter(self):
"""
The size of the iteration space required to evaluate all aliasing expressions
in this Group, along each Dimension.
"""
ret = defaultdict(int)
for i in self.Toffsets:
for d, v in i:
try:
distance = int(max(v) - min(v))
except TypeError:
# An entry in `v` has symbolic components, e.g. `x_m + 2`
if len(set(v)) == 1:
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret
@property
def pivot(self):
"""
A deterministically chosen Candidate for this Group.
"""
return self[0]
@property
def dimensions(self):
return self.pivot.dimensions
@property
def dimensions_translated(self):
return frozenset(d for d, v in self.diameter.items() if v > 0)
@cached_property
def _pivot_legal_rotations(self):
"""
All legal rotations along each Dimension for the Group pivot.
"""
ret = {}
for d, (maxd, mini) in self._pivot_legal_shifts.items():
# Rotation size = mini (min-increment) - maxd (max-decrement)
v = mini - maxd
# Build the table of all possible rotations
m = make_rotations_table(d, v)
distances = []
for rotation in m:
# Distance of the rotation `i` from `c`
distance = maxd - rotation.lower
assert distance == mini - rotation.upper
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret
@cached_property
def _pivot_min_intervals(self):
"""
The minimum Interval along each Dimension such that by evaluating the
pivot, all Candidates are evaluated too.
"""
c = self.pivot
ret = defaultdict(lambda: [np.inf, -np.inf])
for i in self:
distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
for d, v in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for d, (m, M) in ret.items()}
return ret
@cached_property
def _pivot_legal_shifts(self):
"""
The max decrement and min increment along each Dimension such that the
Group pivot does not go OOB.
"""
c = self.pivot
ret = defaultdict(lambda: (-np.inf, np.inf))
for i, ofs in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
# `f`'s cumulative halo size along `l`
hsize = sum(f._size_halo[l])
# Any `ofs`'s shift due to non-[0,0] iteration space
lower, upper = c.shifts[l].offsets
try:
# Assume `ofs[d]` is a number (typical case)
maxd = min(0, max(ret[l][0], -ofs[l] - lower))
mini = max(0, min(ret[l][1], hsize - ofs[l] - upper))
ret[l] = (maxd, mini)
except TypeError:
# E.g., `ofs[d] = x_m - x + 5`
ret[l] = (0, 0)
return ret
AliasedGroup = namedtuple('AliasedGroup', 'intervals aliaseds distances')
ScheduledAlias = namedtuple('ScheduledAlias', 'alias writeto ispace aliaseds indicess')
ScheduledAlias.__new__.__defaults__ = (None,) * len(ScheduledAlias._fields)
SpacePoint = namedtuple('SpacePoint', 'schedule exprs score')
class Schedule(tuple):
def __new__(cls, *items, dmapper=None, rmapper=None):
obj = super(Schedule, cls).__new__(cls, items)
obj.dmapper = dmapper or {}
obj.rmapper = rmapper or {}
return obj
class AliasMapper(OrderedDict):
def add(self, alias, intervals, aliaseds, distances):
assert len(aliaseds) == len(distances)
self[alias] = AliasedGroup(intervals, aliaseds, distances)
def update(self, aliases):
for k, v in aliases.items():
try:
v0 = self[k]
if v0.intervals != v.intervals:
raise ValueError
v0.aliaseds.extend(v.aliaseds)
v0.distances.extend(v.distances)
except KeyError:
self[k] = v
@property
def aliaseds(self):
return flatten(i.aliaseds for i in self.values())
def make_rotations_table(d, v):
"""
All possible rotations of `range(v+1)`.
"""
m = np.array([[j-i if j > i else 0 for j in range(v+1)] for i in range(v+1)])
m = (m - m.T)[::-1, :]
# Shift the table so that the middle rotation is at the top
m = np.roll(m, int(-np.floor(v/2)), axis=0)
# Turn into a more compact representation as a list of Intervals
m = [Interval(d, min(i), max(i)) for i in m]
return m
def cit(ispace0, ispace1):
"""
The Common IterationIntervals of two IterationSpaces.
"""
found = []
for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals):
if it0 == it1:
found.append(it0)
else:
break
return tuple(found)
def maybe_coeff_key(grid, expr):
"""
True if `expr` could be the coefficient of an FD derivative, False otherwise.
"""
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(not set(grid.dimensions) <= set(i.function.dimensions) for i in indexeds)
def wset(exprs):
"""
Extract the working set out of a set of equations.
"""
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)])
if i.function.is_AbstractFunction}
def potential_max_deriv_order(exprs):
"""
The maximum FD derivative order in a list of expressions.
"""
# NOTE: e might propagate the Derivative(...) information down from the
# symbolic language, but users may do crazy things and write their own custom
# expansions "by hand" (i.e., not resorting to Derivative(...)), hence instead
# of looking for Derivative(...) we use the following heuristic:
# add(mul, mul, ...) -> stems from first order derivative
# add(mul(add(mul, mul, ...), ...), ...) -> stems from second order derivative
# ...
nadds = lambda e: (int(e.is_Add) +
max([nadds(a) for a in e.args], default=0) if not q_leaf(e) else 0)
return max([nadds(e) for e in exprs], default=0)
def search_potential_deriv(expr, n, c=0):
"""
Retrieve the expressions at depth `n` that potentially stem from FD derivatives.
"""
assert n >= c >= 0
if q_leaf(expr) or expr.is_Pow:
return []
elif expr.is_Mul:
if c == n:
return [expr]
else:
return flatten([search_potential_deriv(a, n, c+1) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args])
|
translated
|
True if ``self`` is translated w.r.t. ``other``, False otherwise.
Examples
--------
Two candidates are translated if their bases are the same and
their offsets are pairwise translated.
c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}
u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}
Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`
|
from collections import OrderedDict, defaultdict, namedtuple
from functools import partial
from itertools import groupby
from cached_property import cached_property
import numpy as np
from devito.ir import (SEQUENTIAL, PARALLEL, PARALLEL_IF_PVT, ROUNDABLE, DataSpace,
Forward, IterationInstance, IterationSpace, Interval,
IntervalGroup, LabeledVector, Context, detect_accesses,
build_intervals, normalize_properties)
from devito.passes.clusters.utils import timed_pass
from devito.symbolics import (Uxmapper, compare_ops, estimate_cost, q_constant,
q_leaf, retrieve_indexed, search, uxreplace)
from devito.tools import as_tuple, flatten, split
from devito.types import (Array, TempFunction, Eq, Symbol, ModuloDimension,
CustomDimension, IncrDimension)
__all__ = ['cire']
@timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
"""
Cross-iteration redundancies elimination.
Parameters
----------
cluster : Cluster
Input Cluster, subject of the optimization pass.
mode : str
The transformation mode. Accepted: ['invariants', 'sops'].
* 'invariants' is for sub-expressions that are invariant w.r.t. one or
more Dimensions.
* 'sops' stands for sums-of-products, that is redundancies are searched
across all expressions in sum-of-product form.
sregistry : SymbolRegistry
The symbol registry, to create unique temporary names.
options : dict
The optimization options.
Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].
* 'min-storage': if True, the pass will try to minimize the amount of
storage introduced for the tensor temporaries. This might also reduce
the operation count. On the other hand, this might affect fusion and
therefore data locality. Defaults to False (legacy).
* 'cire-maxpar': if True, privilege parallelism over working set size,
that is the pass will try to create as many parallel loops as possible,
even though this will require more space (Dimensions) for the temporaries.
Defaults to False.
* 'cire-rotate': if True, the pass will use modulo indexing for the
outermost Dimension iterated over by the temporaries. This will sacrifice
a parallel loop for a reduced working set size. Defaults to False (legacy).
* 'cire-maxalias': if True, capture the largest redundancies. This will
minimize the flop count while maximizing the number of tensor temporaries,
thus increasing the working set size.
platform : Platform
The underlying platform. Used to optimize the shape of the introduced
tensor symbols.
Examples
--------
1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`
t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]
which after CIRE becomes
t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])
t0 = t1[x,y,z]*c[t,x,y,z]
2) 'sops'. Below we see two expressions in sum-of-product form (in this
case, the sum degenerates to a single product).
t0 = 2.0*a[x,y,z]*b[x,y,z]
t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]
CIRE detects that these two expressions are actually redundant and rewrites
them as:
t2[x,y,z] = a[x,y,z]*b[x,y,z]
t0 = 2.0*t2[x,y,z]
t1 = 3.0*t2[x,y,z+1]
"""
if mode == 'invariants':
space = ('inv-basic', 'inv-compound')
elif mode in ('sops',):
space = (mode,)
else:
assert False, "Unknown CIRE mode `%s`" % mode
processed = []
for c in clusters:
# We don't care about sparse Clusters. Their computational cost is
# negligible and processing all of them would only increase compilation
# time and potentially make the generated code more chaotic
if not c.is_dense:
processed.append(c)
continue
# Some of the CIRE transformers need to look inside all scopes
# surrounding `c` to perform data dependencies analysis
context = Context(c).process(clusters)
# Applying CIRE may change `c` as well as creating one or more new Clusters
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed
def _cire(cluster, context, space, sregistry, options, platform):
# Construct the space of variants
variants = [modes[mode](sregistry, options).make_schedule(cluster, context)
for mode in space]
if not any(i.schedule for i in variants):
return [cluster]
# Pick the variant with the highest score, that is the variant with the best
# trade-off between operation count reduction and working set size increase
schedule, exprs = pick_best(variants)
# Schedule -> [Clusters]
schedule = optimize_schedule(cluster, schedule, platform, sregistry, options)
clusters, subs = lower_schedule(cluster, schedule, sregistry, options)
clusters.append(rebuild(cluster, exprs, subs, schedule))
return clusters
class Cire(object):
"""
Base class for CIRE transformers.
"""
optname = None
mode = None
def __init__(self, sregistry, options):
self.sregistry = sregistry
self._opt_minstorage = options['min-storage']
self._opt_mincost = options['cire-mincost'][self.optname]
self._opt_maxpar = options['cire-maxpar']
self._opt_maxalias = options['cire-maxalias']
def make_schedule(self, cluster, context):
# Capture aliases within `exprs`
aliases = AliasMapper()
score = 0
exprs = cluster.exprs
ispace = cluster.ispace
for n in range(self._nrepeats(cluster)):
# Extract potentially aliasing expressions
mapper = self._extract(exprs, context, n)
# Search aliasing expressions
found = collect(mapper.extracted, ispace, self._opt_minstorage)
# Choose the aliasing expressions with a good flops/memory trade-off
exprs, chosen, pscore = choose(found, exprs, mapper, self._selector)
aliases.update(chosen)
score += pscore
# AliasMapper -> Schedule
schedule = lower_aliases(cluster, aliases, self._in_writeto, self._opt_maxpar)
# The actual score is a 2-tuple <flop-reduction-score, workin-set-score>
score = (score, len(aliases))
return SpacePoint(schedule, exprs, score)
def _make_symbol(self):
return Symbol(name=self.sregistry.make_name('dummy'))
def _nrepeats(self, cluster):
raise NotImplementedError
def _extract(self, exprs, context, n):
raise NotImplementedError
def _in_writeto(self, dim, cluster):
raise NotImplementedError
def _selector(self, e, naliases):
raise NotImplementedError
class CireInvariants(Cire):
optname = 'invariants'
def _nrepeats(self, cluster):
return 1
def _rule(self, e):
return (e.is_Function or
(e.is_Pow and e.exp.is_Number and e.exp < 1))
def _extract(self, exprs, context, n):
mapper = Uxmapper()
for prefix, clusters in context.items():
if not prefix:
continue
exclude = set().union(*[c.scope.writes for c in clusters])
exclude.add(prefix[-1].dim)
for e in exprs:
for i in search(e, self._rule, 'all', 'bfs_first_hit'):
if {a.function for a in i.free_symbols} & exclude:
continue
mapper.add(i, self._make_symbol)
return mapper
def _in_writeto(self, dim, cluster):
return PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if all(i.function.is_Symbol for i in e.free_symbols):
# E.g., `dt**(-2)`
mincost = self._opt_mincost['scalar']
else:
mincost = self._opt_mincost['tensor']
return estimate_cost(e, True)*naliases // mincost
class CireInvariantsBasic(CireInvariants):
mode = 'inv-basic'
class CireInvariantsCompound(CireInvariants):
mode = 'inv-compound'
def _extract(self, exprs, context, n):
extracted = super()._extract(exprs, context, n).extracted
rule = lambda e: any(a in extracted for a in e.args)
mapper = Uxmapper()
for e in exprs:
for i in search(e, rule, 'all', 'dfs'):
if not i.is_commutative:
continue
key = lambda a: a in extracted
terms, others = split(i.args, key)
mapper.add(i, self._make_symbol, terms)
return mapper
class CireSOPS(Cire):
optname = 'sops'
mode = 'sops'
def _nrepeats(self, cluster):
# The `nrepeats` is calculated such that we analyze all potential derivatives
# in `cluster`
return potential_max_deriv_order(cluster.exprs)
def _extract(self, exprs, context, n):
# Forbid CIRE involving Dimension-independent dependencies, e.g.:
# r0 = ...
# u[x, y] = ... r0*a[x, y] ...
# NOTE: if one uses the DSL in a conventional way and sticks to the default
# compilation pipelines where CSE always happens after CIRE, then `exclude`
# will always be empty
exclude = {i.source.indexed for i in context[None].scope.d_flow.independent()}
mapper = Uxmapper()
for e in exprs:
for i in search_potential_deriv(e, n):
if i.free_symbols & exclude:
continue
key = lambda a: a.is_Add
terms, others = split(i.args, key)
if self._opt_maxalias:
# Treat `e` as an FD expression and pull out the derivative
# coefficient from `i`
# Note: typically derivative coefficients are numbers, but
# sometimes they could be provided in symbolic form through an
# arbitrary Function. In the latter case, we rely on the
# heuristic that such Function's basically never span the whole
# grid, but rather a single Grid dimension (e.g., `c[z, n]` for a
# stencil of diameter `n` along `z`)
if e.grid is not None and terms:
key = partial(maybe_coeff_key, e.grid)
others, more_terms = split(others, key)
terms += more_terms
mapper.add(i, self._make_symbol, terms)
return mapper
def _in_writeto(self, dim, cluster):
return self._opt_maxpar and PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if naliases <= 1:
return 0
else:
return estimate_cost(e, True)*naliases // self._opt_mincost
modes = {
CireInvariantsBasic.mode: CireInvariantsBasic,
CireInvariantsCompound.mode: CireInvariantsCompound,
CireSOPS.mode: CireSOPS
}
def collect(extracted, ispace, min_storage):
"""
Find groups of aliasing expressions.
We shall introduce the following (loose) terminology:
* A ``terminal`` is the leaf of a mathematical operation. Terminals
can be numbers (n), literals (l), or Indexeds (I).
* ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,
``R(I) = J``, where ``J`` has the same base as ``I`` but with all
offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.
* A ``relaxed expression`` is an expression in which all of the
terminals are relaxed.
Now we define the concept of aliasing. We say that an expression A
aliases an expression B if:
* ``R(A) == R(B)``
* all pairwise Indexeds in A and B access memory locations at a
fixed constant distance along each Dimension.
For example, consider the following expressions:
* a[i+1] + b[i+1]
* a[i+1] + b[j+1]
* a[i] + c[i]
* a[i+2] - b[i+2]
* a[i+2] + b[i]
* a[i-1] + b[i-1]
Out of the expressions above, the following alias to `a[i] + b[i]`:
* a[i+1] + b[i+1] : same operands and operations, distance along i: 1
* a[i-1] + b[i-1] : same operands and operations, distance along i: -1
Whereas the following do not:
* a[i+1] + b[j+1] : because at least one index differs
* a[i] + c[i] : because at least one of the operands differs
* a[i+2] - b[i+2] : because at least one operation differs
* a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0)
"""
# Find the potential aliases
found = []
for expr in extracted:
assert not expr.is_Equality
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for e, ai in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, e - ai))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if not indexeds or len(bases) == len(indexeds):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
# Create groups of aliasing expressions
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
# Is the arithmetic structure of `c` and `u` equivalent ?
if not compare_ops(c.expr, u.expr):
continue
# Is `c` translated w.r.t. `u` ?
if not c.translated(u):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
# For each Dimension, determine the Minimum Intervals (MI) spanning
# all of the Groups diameters
# Example: x's largest_diameter=2 => [x[-2,0], x[-1,1], x[0,2]]
# Note: Groups that cannot evaluate their diameter are dropped
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for d, v in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for d, v in mapper.items()}
# For each Group, find a rotation that is compatible with a given MI
mapper = {}
for d, intervals in intervalss.items():
# Not all groups may access all dimensions
# Example: `d=t` and groups=[Group(...[t, x]...), Group(...[time, x]...)]
impacted = [g for g in groups if d in g.dimensions]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(distance is not None for distance in found.values()):
# `interval` is OK !
mapper[interval] = found
break
if len(mapper) == len(intervalss):
break
# Try again with fewer groups
# Heuristic: first try retaining the larger ones
smallest = len(min(groups, key=len))
fallback = groups
groups, remainder = split(groups, lambda g: len(g) > smallest)
if groups:
queue.append(remainder)
elif len(remainder) > 1:
# No luck with the heuristic, e.g. there are two groups
# and both have same `len`
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for i, v in mapper.items()])
# Create the basis alias
offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels])
for v in c.offsets]
subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]]
for i, b, v in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
# All aliased expressions
aliaseds = [extracted[i.expr] for i in g]
# Distance of each aliased expression from the basis alias
distances = []
for i in g:
distance = [o.distance(v) for o, v in zip(i.offsets, offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for d, v in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases
def choose(aliases, exprs, mapper, selector):
"""
Analyze the detected aliases and, after applying a cost model to rule out
the aliases with a bad flops/memory trade-off, inject them into the original
expressions.
"""
tot = 0
retained = AliasMapper()
# Pass 1: a set of aliasing expressions is retained only if its cost
# exceeds the mode's threshold
candidates = OrderedDict()
aliaseds = []
others = []
for e, v in aliases.items():
score = selector(e, len(v.aliaseds))
if score > 0:
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
# Do not waste time if unneccesary
if not candidates:
return exprs, retained, tot
# Project the candidate aliases into exprs to determine what the new
# working set would be
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(aliaseds)}
templated = [uxreplace(e, mapper) for e in exprs]
# Pass 2: a set of aliasing expressions is retained only if the tradeoff
# between operation count reduction and working set increase is favorable
owset = wset(others + templated)
for e, v in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if score > 1 or \
score == 1 and max(len(wset(e)), 1) > len(wset(e) & owset):
retained[e] = v
tot += score
# Do not waste time if unneccesary
if not retained:
return exprs, retained, tot
# Substitute the chosen aliasing sub-expressions
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(retained.aliaseds)}
exprs = [uxreplace(e, mapper) for e in exprs]
return exprs, retained, tot
def lower_aliases(cluster, aliases, in_writeto, maxpar):
"""
Create a Schedule from an AliasMapper.
"""
dmapper = {}
processed = []
for alias, v in aliases.items():
imapper = {**{i.dim: i for i in v.intervals},
**{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
# E.g., `x0_blk0` or (`a[y_m+1]` => `y not in imapper`)
intervals.append(i)
continue
assert i.stamp >= interval.stamp
if not (writeto or interval != interval.zero() or in_writeto(i.dim, cluster)):
# The alias doesn't require a temporary Dimension along i.dim
intervals.append(i)
continue
assert not i.dim.is_NonlinearDerived
# `i.dim` is necessarily part of the write-to region, so
# we have to adjust the Interval's stamp. For example, consider
# `i=x[0,0]<1>` and `interval=x[-4,4]<0>`; here we need to
# use `<1>` as stamp, which is what appears in `cluster`
interval = interval.lift(i.stamp)
# We further bump the interval stamp if we were requested to trade
# fusion for more collapse-parallelism
interval = interval.lift(interval.stamp + int(maxpar))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
# Suitable IncrDimensions must be used to avoid OOB accesses.
# E.g., r[xs][ys][z] => both `xs` and `ys` must be initialized such
# that all accesses are within bounds. This requires traversing the
# hierarchy of IncrDimensions to set `xs` (`ys`) in a way that
# consecutive blocks access consecutive regions in `r` (e.g.,
# `xs=x0_blk1-x0_blk0` with `blocklevels=2`; `xs=0` with
# `blocklevels=1`, that is it degenerates in this case)
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
# An IncrDimension in between IncrDimensions
m = i.dim.symbolic_min - i.dim.parent.symbolic_min
else:
m = 0
d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m,
dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
# Given the iteration `interval`, lower distances to indices
for distance, indices in zip(v.distances, indicess):
indices.append(d - interval.lower + distance[interval.dim])
# The alias write-to space
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
# The alias iteration space
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
# The [ScheduledAliases] must be ordered so as to reuse as many of the
# `cluster`'s IterationIntervals as possible in order to honor the
# write-to region. Another fundamental reason for ordering is to ensure
# deterministic code generation
processed = sorted(processed, key=lambda i: cit(cluster.ispace, i.ispace))
return Schedule(*processed, dmapper=dmapper)
def optimize_schedule(cluster, schedule, platform, sregistry, options):
"""
Rewrite the schedule for performance optimization.
"""
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule
def _optimize_schedule_rotations(schedule, sregistry):
"""
Transform the schedule such that the tensor temporaries "rotate" along
the outermost Dimension. This trades a parallel Dimension for a smaller
working set size.
"""
# The rotations Dimension is the outermost
ridx = 0
rmapper = defaultdict(list)
processed = []
for k, group in groupby(schedule, key=lambda i: i.writeto):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
# Can't do anything if `d` isn't an IncrDimension over a block
processed.extend(g)
continue
n = candidate.min_size
assert n > 0
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension('%sii' % d, ds, iis, incr=iib)
cd = CustomDimension(name='%s%s' % (d, d), symbolic_min=ii, symbolic_max=iib,
symbolic_size=n)
dsi = ModuloDimension('%si' % ds, cd, cd + ds - iis, n)
mapper = OrderedDict()
for i in g:
# Update `indicess` to use `xs0`, `xs1`, ...
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix='%sr' % d.name)
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [indices[:ridx] + [md] + indices[ridx + 1:]
for md, indices in zip(mds, i.indicess)]
# Update `writeto` by switching `d` to `dsi`
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
# Transform `alias` by adding `i`
alias = i.alias.xreplace({d: d + cd})
# Extend `ispace` to iterate over rotations
d1 = writeto[ridx+1].dim # Note: we're by construction in-bounds here
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: mds + [ii]})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
# Update the rotations mapper
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper)
def _optimize_schedule_padding(cluster, schedule, platform):
"""
Round up the innermost IterationInterval of the tensor temporaries IterationSpace
to a multiple of the SIMD vector length. This is not always possible though (it
depends on how much halo is safely accessible in all read Functions).
"""
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[-1]
if ROUNDABLE in cluster.properties[it.dim]:
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, it.interval.size % vl))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds,
i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper)
def lower_schedule(cluster, schedule, sregistry, options):
"""
Turn a Schedule into a sequence of Clusters.
"""
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
# Typical case -- the user does *not* "see" the CIRE-created temporaries
make = Array
clusters = []
subs = {}
for alias, writeto, ispace, aliaseds, indicess in schedule:
# Basic info to create the temporary that will hold the alias
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
# The Dimensions defining the shape of Array
# Note: with SubDimensions, we may have the following situation:
#
# for zi = z_m + zi_ltkn; zi <= z_M - zi_rtkn; ...
# r[zi] = ...
#
# Instead of `r[zi - z_m - zi_ltkn]` we have just `r[zi]`, so we'll need
# as much room as in `zi`'s parent to avoid going OOB
# Aside from ugly generated code, the reason we do not rather shift the
# indices is that it prevents future passes to transform the loop bounds
# (e.g., MPI's comp/comm overlap does that)
dimensions = [d.parent if d.is_Sub else d for d in writeto.itdimensions]
# The halo must be set according to the size of writeto space
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
# The indices used to write into the Array
indices = []
for i in writeto:
try:
# E.g., `xs`
sub_iterators = writeto.sub_iterators[i.dim]
assert len(sub_iterators) == 1
indices.append(sub_iterators[0])
except KeyError:
# E.g., `z` -- a non-shifted Dimension
indices.append(i.dim - i.lower)
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = lambda idx: obj[idx]
else:
# Degenerate case: scalar expression
assert writeto.size == 0
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = lambda idx: obj
# Create the substitution rules for the aliasing expressions
subs.update({aliased: callback(indices)
for aliased, indices in zip(aliaseds, indicess)})
# Construct the `alias` DataSpace
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
# Drop or weaken parallelism if necessary
properties = dict(cluster.properties)
for d, v in cluster.properties.items():
if any(i.is_Modulo for i in ispace.sub_iterators[d]):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif d not in writeto.dimensions:
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
# Finally, build the `alias` Cluster
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace,
dspace=dspace, properties=properties))
return clusters, subs
def pick_best(variants):
"""
Use the variant score and heuristics to return the variant with the best
trade-off between operation count reduction and working set increase.
"""
best = variants.pop(0)
for i in variants:
best_flop_score, best_ws_score = best.score
if best_flop_score == 0:
best = i
continue
i_flop_score, i_ws_score = i.score
# The current heustic is fairly basic: the one with smaller working
# set size increase wins, unless there's a massive reduction in operation
# count in the other one
delta = i_ws_score - best_ws_score
if (delta > 0 and i_flop_score / best_flop_score > 100) or \
(delta == 0 and i_flop_score > best_flop_score) or \
(delta < 0 and best_flop_score / i_flop_score <= 100):
best = i
schedule, exprs, _ = best
return schedule, exprs
def rebuild(cluster, exprs, subs, schedule):
"""
Plug the optimized aliases into the input Cluster. This leads to creating
a new Cluster with suitable IterationSpace and DataSpace.
"""
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace)
# Utilities
class Candidate(object):
def __init__(self, expr, ispace, indexeds, bases, offsets):
self.expr = expr
self.shifts = ispace.intervals
self.indexeds = indexeds
self.bases = bases
self.offsets = offsets
def __repr__(self):
return "Candidate(expr=%s)" % self.expr
# MASKED: translated function (lines 895-927)
@cached_property
def Toffsets(self):
return LabeledVector.transpose(*self.offsets)
@cached_property
def dimensions(self):
return frozenset(i for i, _ in self.Toffsets)
class Group(tuple):
"""
A collection of aliasing expressions.
"""
def __repr__(self):
return "Group(%s)" % ", ".join([str(i) for i in self])
def find_rotation_distance(self, d, interval):
"""
The distance from the Group pivot of a rotation along Dimension ``d`` that
can safely iterate over the ``interval``.
"""
assert d is interval.dim
for rotation, distance in self._pivot_legal_rotations[d]:
# Does `rotation` cover the `interval` ?
if rotation.union(interval) != rotation:
continue
# Infer the `rotation`'s min_intervals from the pivot's
min_interval = self._pivot_min_intervals[d].translate(-distance)
# Does the `interval` actually cover the `rotation`'s `min_interval`?
if interval.union(min_interval) == interval:
return distance
return None
@cached_property
def Toffsets(self):
return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])]
@cached_property
def diameter(self):
"""
The size of the iteration space required to evaluate all aliasing expressions
in this Group, along each Dimension.
"""
ret = defaultdict(int)
for i in self.Toffsets:
for d, v in i:
try:
distance = int(max(v) - min(v))
except TypeError:
# An entry in `v` has symbolic components, e.g. `x_m + 2`
if len(set(v)) == 1:
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret
@property
def pivot(self):
"""
A deterministically chosen Candidate for this Group.
"""
return self[0]
@property
def dimensions(self):
return self.pivot.dimensions
@property
def dimensions_translated(self):
return frozenset(d for d, v in self.diameter.items() if v > 0)
@cached_property
def _pivot_legal_rotations(self):
"""
All legal rotations along each Dimension for the Group pivot.
"""
ret = {}
for d, (maxd, mini) in self._pivot_legal_shifts.items():
# Rotation size = mini (min-increment) - maxd (max-decrement)
v = mini - maxd
# Build the table of all possible rotations
m = make_rotations_table(d, v)
distances = []
for rotation in m:
# Distance of the rotation `i` from `c`
distance = maxd - rotation.lower
assert distance == mini - rotation.upper
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret
@cached_property
def _pivot_min_intervals(self):
"""
The minimum Interval along each Dimension such that by evaluating the
pivot, all Candidates are evaluated too.
"""
c = self.pivot
ret = defaultdict(lambda: [np.inf, -np.inf])
for i in self:
distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
for d, v in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for d, (m, M) in ret.items()}
return ret
@cached_property
def _pivot_legal_shifts(self):
"""
The max decrement and min increment along each Dimension such that the
Group pivot does not go OOB.
"""
c = self.pivot
ret = defaultdict(lambda: (-np.inf, np.inf))
for i, ofs in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
# `f`'s cumulative halo size along `l`
hsize = sum(f._size_halo[l])
# Any `ofs`'s shift due to non-[0,0] iteration space
lower, upper = c.shifts[l].offsets
try:
# Assume `ofs[d]` is a number (typical case)
maxd = min(0, max(ret[l][0], -ofs[l] - lower))
mini = max(0, min(ret[l][1], hsize - ofs[l] - upper))
ret[l] = (maxd, mini)
except TypeError:
# E.g., `ofs[d] = x_m - x + 5`
ret[l] = (0, 0)
return ret
AliasedGroup = namedtuple('AliasedGroup', 'intervals aliaseds distances')
ScheduledAlias = namedtuple('ScheduledAlias', 'alias writeto ispace aliaseds indicess')
ScheduledAlias.__new__.__defaults__ = (None,) * len(ScheduledAlias._fields)
SpacePoint = namedtuple('SpacePoint', 'schedule exprs score')
class Schedule(tuple):
def __new__(cls, *items, dmapper=None, rmapper=None):
obj = super(Schedule, cls).__new__(cls, items)
obj.dmapper = dmapper or {}
obj.rmapper = rmapper or {}
return obj
class AliasMapper(OrderedDict):
def add(self, alias, intervals, aliaseds, distances):
assert len(aliaseds) == len(distances)
self[alias] = AliasedGroup(intervals, aliaseds, distances)
def update(self, aliases):
for k, v in aliases.items():
try:
v0 = self[k]
if v0.intervals != v.intervals:
raise ValueError
v0.aliaseds.extend(v.aliaseds)
v0.distances.extend(v.distances)
except KeyError:
self[k] = v
@property
def aliaseds(self):
return flatten(i.aliaseds for i in self.values())
def make_rotations_table(d, v):
"""
All possible rotations of `range(v+1)`.
"""
m = np.array([[j-i if j > i else 0 for j in range(v+1)] for i in range(v+1)])
m = (m - m.T)[::-1, :]
# Shift the table so that the middle rotation is at the top
m = np.roll(m, int(-np.floor(v/2)), axis=0)
# Turn into a more compact representation as a list of Intervals
m = [Interval(d, min(i), max(i)) for i in m]
return m
def cit(ispace0, ispace1):
"""
The Common IterationIntervals of two IterationSpaces.
"""
found = []
for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals):
if it0 == it1:
found.append(it0)
else:
break
return tuple(found)
def maybe_coeff_key(grid, expr):
"""
True if `expr` could be the coefficient of an FD derivative, False otherwise.
"""
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(not set(grid.dimensions) <= set(i.function.dimensions) for i in indexeds)
def wset(exprs):
"""
Extract the working set out of a set of equations.
"""
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)])
if i.function.is_AbstractFunction}
def potential_max_deriv_order(exprs):
"""
The maximum FD derivative order in a list of expressions.
"""
# NOTE: e might propagate the Derivative(...) information down from the
# symbolic language, but users may do crazy things and write their own custom
# expansions "by hand" (i.e., not resorting to Derivative(...)), hence instead
# of looking for Derivative(...) we use the following heuristic:
# add(mul, mul, ...) -> stems from first order derivative
# add(mul(add(mul, mul, ...), ...), ...) -> stems from second order derivative
# ...
nadds = lambda e: (int(e.is_Add) +
max([nadds(a) for a in e.args], default=0) if not q_leaf(e) else 0)
return max([nadds(e) for e in exprs], default=0)
def search_potential_deriv(expr, n, c=0):
"""
Retrieve the expressions at depth `n` that potentially stem from FD derivatives.
"""
assert n >= c >= 0
if q_leaf(expr) or expr.is_Pow:
return []
elif expr.is_Mul:
if c == n:
return [expr]
else:
return flatten([search_potential_deriv(a, n, c+1) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args])
|
def translated(self, other):
"""
True if ``self`` is translated w.r.t. ``other``, False otherwise.
Examples
--------
Two candidates are translated if their bases are the same and
their offsets are pairwise translated.
c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}
u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}
Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`
"""
if len(self.Toffsets) != len(other.Toffsets):
return False
if len(self.bases) != len(other.bases):
return False
# Check the bases
if any(b0 != b1 for b0, b1 in zip(self.bases, other.bases)):
return False
# Check the offsets
for (d0, o0), (d1, o1) in zip(self.Toffsets, other.Toffsets):
if d0 is not d1:
return False
distance = set(o0 - o1)
if len(distance) != 1:
return False
return True
| 895
| 927
|
from collections import OrderedDict, defaultdict, namedtuple
from functools import partial
from itertools import groupby
from cached_property import cached_property
import numpy as np
from devito.ir import (SEQUENTIAL, PARALLEL, PARALLEL_IF_PVT, ROUNDABLE, DataSpace,
Forward, IterationInstance, IterationSpace, Interval,
IntervalGroup, LabeledVector, Context, detect_accesses,
build_intervals, normalize_properties)
from devito.passes.clusters.utils import timed_pass
from devito.symbolics import (Uxmapper, compare_ops, estimate_cost, q_constant,
q_leaf, retrieve_indexed, search, uxreplace)
from devito.tools import as_tuple, flatten, split
from devito.types import (Array, TempFunction, Eq, Symbol, ModuloDimension,
CustomDimension, IncrDimension)
__all__ = ['cire']
@timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
"""
Cross-iteration redundancies elimination.
Parameters
----------
cluster : Cluster
Input Cluster, subject of the optimization pass.
mode : str
The transformation mode. Accepted: ['invariants', 'sops'].
* 'invariants' is for sub-expressions that are invariant w.r.t. one or
more Dimensions.
* 'sops' stands for sums-of-products, that is redundancies are searched
across all expressions in sum-of-product form.
sregistry : SymbolRegistry
The symbol registry, to create unique temporary names.
options : dict
The optimization options.
Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].
* 'min-storage': if True, the pass will try to minimize the amount of
storage introduced for the tensor temporaries. This might also reduce
the operation count. On the other hand, this might affect fusion and
therefore data locality. Defaults to False (legacy).
* 'cire-maxpar': if True, privilege parallelism over working set size,
that is the pass will try to create as many parallel loops as possible,
even though this will require more space (Dimensions) for the temporaries.
Defaults to False.
* 'cire-rotate': if True, the pass will use modulo indexing for the
outermost Dimension iterated over by the temporaries. This will sacrifice
a parallel loop for a reduced working set size. Defaults to False (legacy).
* 'cire-maxalias': if True, capture the largest redundancies. This will
minimize the flop count while maximizing the number of tensor temporaries,
thus increasing the working set size.
platform : Platform
The underlying platform. Used to optimize the shape of the introduced
tensor symbols.
Examples
--------
1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`
t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]
which after CIRE becomes
t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])
t0 = t1[x,y,z]*c[t,x,y,z]
2) 'sops'. Below we see two expressions in sum-of-product form (in this
case, the sum degenerates to a single product).
t0 = 2.0*a[x,y,z]*b[x,y,z]
t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]
CIRE detects that these two expressions are actually redundant and rewrites
them as:
t2[x,y,z] = a[x,y,z]*b[x,y,z]
t0 = 2.0*t2[x,y,z]
t1 = 3.0*t2[x,y,z+1]
"""
if mode == 'invariants':
space = ('inv-basic', 'inv-compound')
elif mode in ('sops',):
space = (mode,)
else:
assert False, "Unknown CIRE mode `%s`" % mode
processed = []
for c in clusters:
# We don't care about sparse Clusters. Their computational cost is
# negligible and processing all of them would only increase compilation
# time and potentially make the generated code more chaotic
if not c.is_dense:
processed.append(c)
continue
# Some of the CIRE transformers need to look inside all scopes
# surrounding `c` to perform data dependencies analysis
context = Context(c).process(clusters)
# Applying CIRE may change `c` as well as creating one or more new Clusters
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed
def _cire(cluster, context, space, sregistry, options, platform):
# Construct the space of variants
variants = [modes[mode](sregistry, options).make_schedule(cluster, context)
for mode in space]
if not any(i.schedule for i in variants):
return [cluster]
# Pick the variant with the highest score, that is the variant with the best
# trade-off between operation count reduction and working set size increase
schedule, exprs = pick_best(variants)
# Schedule -> [Clusters]
schedule = optimize_schedule(cluster, schedule, platform, sregistry, options)
clusters, subs = lower_schedule(cluster, schedule, sregistry, options)
clusters.append(rebuild(cluster, exprs, subs, schedule))
return clusters
class Cire(object):
"""
Base class for CIRE transformers.
"""
optname = None
mode = None
def __init__(self, sregistry, options):
self.sregistry = sregistry
self._opt_minstorage = options['min-storage']
self._opt_mincost = options['cire-mincost'][self.optname]
self._opt_maxpar = options['cire-maxpar']
self._opt_maxalias = options['cire-maxalias']
def make_schedule(self, cluster, context):
# Capture aliases within `exprs`
aliases = AliasMapper()
score = 0
exprs = cluster.exprs
ispace = cluster.ispace
for n in range(self._nrepeats(cluster)):
# Extract potentially aliasing expressions
mapper = self._extract(exprs, context, n)
# Search aliasing expressions
found = collect(mapper.extracted, ispace, self._opt_minstorage)
# Choose the aliasing expressions with a good flops/memory trade-off
exprs, chosen, pscore = choose(found, exprs, mapper, self._selector)
aliases.update(chosen)
score += pscore
# AliasMapper -> Schedule
schedule = lower_aliases(cluster, aliases, self._in_writeto, self._opt_maxpar)
# The actual score is a 2-tuple <flop-reduction-score, workin-set-score>
score = (score, len(aliases))
return SpacePoint(schedule, exprs, score)
def _make_symbol(self):
return Symbol(name=self.sregistry.make_name('dummy'))
def _nrepeats(self, cluster):
raise NotImplementedError
def _extract(self, exprs, context, n):
raise NotImplementedError
def _in_writeto(self, dim, cluster):
raise NotImplementedError
def _selector(self, e, naliases):
raise NotImplementedError
class CireInvariants(Cire):
optname = 'invariants'
def _nrepeats(self, cluster):
return 1
def _rule(self, e):
return (e.is_Function or
(e.is_Pow and e.exp.is_Number and e.exp < 1))
def _extract(self, exprs, context, n):
mapper = Uxmapper()
for prefix, clusters in context.items():
if not prefix:
continue
exclude = set().union(*[c.scope.writes for c in clusters])
exclude.add(prefix[-1].dim)
for e in exprs:
for i in search(e, self._rule, 'all', 'bfs_first_hit'):
if {a.function for a in i.free_symbols} & exclude:
continue
mapper.add(i, self._make_symbol)
return mapper
def _in_writeto(self, dim, cluster):
return PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if all(i.function.is_Symbol for i in e.free_symbols):
# E.g., `dt**(-2)`
mincost = self._opt_mincost['scalar']
else:
mincost = self._opt_mincost['tensor']
return estimate_cost(e, True)*naliases // mincost
class CireInvariantsBasic(CireInvariants):
mode = 'inv-basic'
class CireInvariantsCompound(CireInvariants):
mode = 'inv-compound'
def _extract(self, exprs, context, n):
extracted = super()._extract(exprs, context, n).extracted
rule = lambda e: any(a in extracted for a in e.args)
mapper = Uxmapper()
for e in exprs:
for i in search(e, rule, 'all', 'dfs'):
if not i.is_commutative:
continue
key = lambda a: a in extracted
terms, others = split(i.args, key)
mapper.add(i, self._make_symbol, terms)
return mapper
class CireSOPS(Cire):
optname = 'sops'
mode = 'sops'
def _nrepeats(self, cluster):
# The `nrepeats` is calculated such that we analyze all potential derivatives
# in `cluster`
return potential_max_deriv_order(cluster.exprs)
def _extract(self, exprs, context, n):
# Forbid CIRE involving Dimension-independent dependencies, e.g.:
# r0 = ...
# u[x, y] = ... r0*a[x, y] ...
# NOTE: if one uses the DSL in a conventional way and sticks to the default
# compilation pipelines where CSE always happens after CIRE, then `exclude`
# will always be empty
exclude = {i.source.indexed for i in context[None].scope.d_flow.independent()}
mapper = Uxmapper()
for e in exprs:
for i in search_potential_deriv(e, n):
if i.free_symbols & exclude:
continue
key = lambda a: a.is_Add
terms, others = split(i.args, key)
if self._opt_maxalias:
# Treat `e` as an FD expression and pull out the derivative
# coefficient from `i`
# Note: typically derivative coefficients are numbers, but
# sometimes they could be provided in symbolic form through an
# arbitrary Function. In the latter case, we rely on the
# heuristic that such Function's basically never span the whole
# grid, but rather a single Grid dimension (e.g., `c[z, n]` for a
# stencil of diameter `n` along `z`)
if e.grid is not None and terms:
key = partial(maybe_coeff_key, e.grid)
others, more_terms = split(others, key)
terms += more_terms
mapper.add(i, self._make_symbol, terms)
return mapper
def _in_writeto(self, dim, cluster):
return self._opt_maxpar and PARALLEL in cluster.properties[dim]
def _selector(self, e, naliases):
if naliases <= 1:
return 0
else:
return estimate_cost(e, True)*naliases // self._opt_mincost
modes = {
CireInvariantsBasic.mode: CireInvariantsBasic,
CireInvariantsCompound.mode: CireInvariantsCompound,
CireSOPS.mode: CireSOPS
}
def collect(extracted, ispace, min_storage):
"""
Find groups of aliasing expressions.
We shall introduce the following (loose) terminology:
* A ``terminal`` is the leaf of a mathematical operation. Terminals
can be numbers (n), literals (l), or Indexeds (I).
* ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,
``R(I) = J``, where ``J`` has the same base as ``I`` but with all
offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.
* A ``relaxed expression`` is an expression in which all of the
terminals are relaxed.
Now we define the concept of aliasing. We say that an expression A
aliases an expression B if:
* ``R(A) == R(B)``
* all pairwise Indexeds in A and B access memory locations at a
fixed constant distance along each Dimension.
For example, consider the following expressions:
* a[i+1] + b[i+1]
* a[i+1] + b[j+1]
* a[i] + c[i]
* a[i+2] - b[i+2]
* a[i+2] + b[i]
* a[i-1] + b[i-1]
Out of the expressions above, the following alias to `a[i] + b[i]`:
* a[i+1] + b[i+1] : same operands and operations, distance along i: 1
* a[i-1] + b[i-1] : same operands and operations, distance along i: -1
Whereas the following do not:
* a[i+1] + b[j+1] : because at least one index differs
* a[i] + c[i] : because at least one of the operands differs
* a[i+2] - b[i+2] : because at least one operation differs
* a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0)
"""
# Find the potential aliases
found = []
for expr in extracted:
assert not expr.is_Equality
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for e, ai in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, e - ai))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if not indexeds or len(bases) == len(indexeds):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
# Create groups of aliasing expressions
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
# Is the arithmetic structure of `c` and `u` equivalent ?
if not compare_ops(c.expr, u.expr):
continue
# Is `c` translated w.r.t. `u` ?
if not c.translated(u):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
# For each Dimension, determine the Minimum Intervals (MI) spanning
# all of the Groups diameters
# Example: x's largest_diameter=2 => [x[-2,0], x[-1,1], x[0,2]]
# Note: Groups that cannot evaluate their diameter are dropped
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for d, v in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for d, v in mapper.items()}
# For each Group, find a rotation that is compatible with a given MI
mapper = {}
for d, intervals in intervalss.items():
# Not all groups may access all dimensions
# Example: `d=t` and groups=[Group(...[t, x]...), Group(...[time, x]...)]
impacted = [g for g in groups if d in g.dimensions]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(distance is not None for distance in found.values()):
# `interval` is OK !
mapper[interval] = found
break
if len(mapper) == len(intervalss):
break
# Try again with fewer groups
# Heuristic: first try retaining the larger ones
smallest = len(min(groups, key=len))
fallback = groups
groups, remainder = split(groups, lambda g: len(g) > smallest)
if groups:
queue.append(remainder)
elif len(remainder) > 1:
# No luck with the heuristic, e.g. there are two groups
# and both have same `len`
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for i, v in mapper.items()])
# Create the basis alias
offsets = [LabeledVector([(l, v[l] + distances[l]) for l in v.labels])
for v in c.offsets]
subs = {i: i.function[[l + v.fromlabel(l, 0) for l in b]]
for i, b, v in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
# All aliased expressions
aliaseds = [extracted[i.expr] for i in g]
# Distance of each aliased expression from the basis alias
distances = []
for i in g:
distance = [o.distance(v) for o, v in zip(i.offsets, offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for d, v in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases
def choose(aliases, exprs, mapper, selector):
"""
Analyze the detected aliases and, after applying a cost model to rule out
the aliases with a bad flops/memory trade-off, inject them into the original
expressions.
"""
tot = 0
retained = AliasMapper()
# Pass 1: a set of aliasing expressions is retained only if its cost
# exceeds the mode's threshold
candidates = OrderedDict()
aliaseds = []
others = []
for e, v in aliases.items():
score = selector(e, len(v.aliaseds))
if score > 0:
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
# Do not waste time if unneccesary
if not candidates:
return exprs, retained, tot
# Project the candidate aliases into exprs to determine what the new
# working set would be
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(aliaseds)}
templated = [uxreplace(e, mapper) for e in exprs]
# Pass 2: a set of aliasing expressions is retained only if the tradeoff
# between operation count reduction and working set increase is favorable
owset = wset(others + templated)
for e, v in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if score > 1 or \
score == 1 and max(len(wset(e)), 1) > len(wset(e) & owset):
retained[e] = v
tot += score
# Do not waste time if unneccesary
if not retained:
return exprs, retained, tot
# Substitute the chosen aliasing sub-expressions
mapper = {k: v for k, v in mapper.items() if v.free_symbols & set(retained.aliaseds)}
exprs = [uxreplace(e, mapper) for e in exprs]
return exprs, retained, tot
def lower_aliases(cluster, aliases, in_writeto, maxpar):
"""
Create a Schedule from an AliasMapper.
"""
dmapper = {}
processed = []
for alias, v in aliases.items():
imapper = {**{i.dim: i for i in v.intervals},
**{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
# E.g., `x0_blk0` or (`a[y_m+1]` => `y not in imapper`)
intervals.append(i)
continue
assert i.stamp >= interval.stamp
if not (writeto or interval != interval.zero() or in_writeto(i.dim, cluster)):
# The alias doesn't require a temporary Dimension along i.dim
intervals.append(i)
continue
assert not i.dim.is_NonlinearDerived
# `i.dim` is necessarily part of the write-to region, so
# we have to adjust the Interval's stamp. For example, consider
# `i=x[0,0]<1>` and `interval=x[-4,4]<0>`; here we need to
# use `<1>` as stamp, which is what appears in `cluster`
interval = interval.lift(i.stamp)
# We further bump the interval stamp if we were requested to trade
# fusion for more collapse-parallelism
interval = interval.lift(interval.stamp + int(maxpar))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
# Suitable IncrDimensions must be used to avoid OOB accesses.
# E.g., r[xs][ys][z] => both `xs` and `ys` must be initialized such
# that all accesses are within bounds. This requires traversing the
# hierarchy of IncrDimensions to set `xs` (`ys`) in a way that
# consecutive blocks access consecutive regions in `r` (e.g.,
# `xs=x0_blk1-x0_blk0` with `blocklevels=2`; `xs=0` with
# `blocklevels=1`, that is it degenerates in this case)
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
# An IncrDimension in between IncrDimensions
m = i.dim.symbolic_min - i.dim.parent.symbolic_min
else:
m = 0
d = dmapper[i.dim] = IncrDimension("%ss" % i.dim.name, i.dim, m,
dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
# Given the iteration `interval`, lower distances to indices
for distance, indices in zip(v.distances, indicess):
indices.append(d - interval.lower + distance[interval.dim])
# The alias write-to space
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
# The alias iteration space
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
# The [ScheduledAliases] must be ordered so as to reuse as many of the
# `cluster`'s IterationIntervals as possible in order to honor the
# write-to region. Another fundamental reason for ordering is to ensure
# deterministic code generation
processed = sorted(processed, key=lambda i: cit(cluster.ispace, i.ispace))
return Schedule(*processed, dmapper=dmapper)
def optimize_schedule(cluster, schedule, platform, sregistry, options):
"""
Rewrite the schedule for performance optimization.
"""
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule
def _optimize_schedule_rotations(schedule, sregistry):
"""
Transform the schedule such that the tensor temporaries "rotate" along
the outermost Dimension. This trades a parallel Dimension for a smaller
working set size.
"""
# The rotations Dimension is the outermost
ridx = 0
rmapper = defaultdict(list)
processed = []
for k, group in groupby(schedule, key=lambda i: i.writeto):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
# Can't do anything if `d` isn't an IncrDimension over a block
processed.extend(g)
continue
n = candidate.min_size
assert n > 0
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension('%sii' % d, ds, iis, incr=iib)
cd = CustomDimension(name='%s%s' % (d, d), symbolic_min=ii, symbolic_max=iib,
symbolic_size=n)
dsi = ModuloDimension('%si' % ds, cd, cd + ds - iis, n)
mapper = OrderedDict()
for i in g:
# Update `indicess` to use `xs0`, `xs1`, ...
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix='%sr' % d.name)
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [indices[:ridx] + [md] + indices[ridx + 1:]
for md, indices in zip(mds, i.indicess)]
# Update `writeto` by switching `d` to `dsi`
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
# Transform `alias` by adding `i`
alias = i.alias.xreplace({d: d + cd})
# Extend `ispace` to iterate over rotations
d1 = writeto[ridx+1].dim # Note: we're by construction in-bounds here
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: mds + [ii]})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
# Update the rotations mapper
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper)
def _optimize_schedule_padding(cluster, schedule, platform):
"""
Round up the innermost IterationInterval of the tensor temporaries IterationSpace
to a multiple of the SIMD vector length. This is not always possible though (it
depends on how much halo is safely accessible in all read Functions).
"""
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[-1]
if ROUNDABLE in cluster.properties[it.dim]:
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, it.interval.size % vl))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds,
i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper)
def lower_schedule(cluster, schedule, sregistry, options):
"""
Turn a Schedule into a sequence of Clusters.
"""
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
# Typical case -- the user does *not* "see" the CIRE-created temporaries
make = Array
clusters = []
subs = {}
for alias, writeto, ispace, aliaseds, indicess in schedule:
# Basic info to create the temporary that will hold the alias
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
# The Dimensions defining the shape of Array
# Note: with SubDimensions, we may have the following situation:
#
# for zi = z_m + zi_ltkn; zi <= z_M - zi_rtkn; ...
# r[zi] = ...
#
# Instead of `r[zi - z_m - zi_ltkn]` we have just `r[zi]`, so we'll need
# as much room as in `zi`'s parent to avoid going OOB
# Aside from ugly generated code, the reason we do not rather shift the
# indices is that it prevents future passes to transform the loop bounds
# (e.g., MPI's comp/comm overlap does that)
dimensions = [d.parent if d.is_Sub else d for d in writeto.itdimensions]
# The halo must be set according to the size of writeto space
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
# The indices used to write into the Array
indices = []
for i in writeto:
try:
# E.g., `xs`
sub_iterators = writeto.sub_iterators[i.dim]
assert len(sub_iterators) == 1
indices.append(sub_iterators[0])
except KeyError:
# E.g., `z` -- a non-shifted Dimension
indices.append(i.dim - i.lower)
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = lambda idx: obj[idx]
else:
# Degenerate case: scalar expression
assert writeto.size == 0
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = lambda idx: obj
# Create the substitution rules for the aliasing expressions
subs.update({aliased: callback(indices)
for aliased, indices in zip(aliaseds, indicess)})
# Construct the `alias` DataSpace
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
# Drop or weaken parallelism if necessary
properties = dict(cluster.properties)
for d, v in cluster.properties.items():
if any(i.is_Modulo for i in ispace.sub_iterators[d]):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif d not in writeto.dimensions:
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
# Finally, build the `alias` Cluster
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace,
dspace=dspace, properties=properties))
return clusters, subs
def pick_best(variants):
"""
Use the variant score and heuristics to return the variant with the best
trade-off between operation count reduction and working set increase.
"""
best = variants.pop(0)
for i in variants:
best_flop_score, best_ws_score = best.score
if best_flop_score == 0:
best = i
continue
i_flop_score, i_ws_score = i.score
# The current heustic is fairly basic: the one with smaller working
# set size increase wins, unless there's a massive reduction in operation
# count in the other one
delta = i_ws_score - best_ws_score
if (delta > 0 and i_flop_score / best_flop_score > 100) or \
(delta == 0 and i_flop_score > best_flop_score) or \
(delta < 0 and best_flop_score / i_flop_score <= 100):
best = i
schedule, exprs, _ = best
return schedule, exprs
def rebuild(cluster, exprs, subs, schedule):
"""
Plug the optimized aliases into the input Cluster. This leads to creating
a new Cluster with suitable IterationSpace and DataSpace.
"""
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed
for k, v in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace)
# Utilities
class Candidate(object):
def __init__(self, expr, ispace, indexeds, bases, offsets):
self.expr = expr
self.shifts = ispace.intervals
self.indexeds = indexeds
self.bases = bases
self.offsets = offsets
def __repr__(self):
return "Candidate(expr=%s)" % self.expr
def translated(self, other):
"""
True if ``self`` is translated w.r.t. ``other``, False otherwise.
Examples
--------
Two candidates are translated if their bases are the same and
their offsets are pairwise translated.
c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}
u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}
Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`
"""
if len(self.Toffsets) != len(other.Toffsets):
return False
if len(self.bases) != len(other.bases):
return False
# Check the bases
if any(b0 != b1 for b0, b1 in zip(self.bases, other.bases)):
return False
# Check the offsets
for (d0, o0), (d1, o1) in zip(self.Toffsets, other.Toffsets):
if d0 is not d1:
return False
distance = set(o0 - o1)
if len(distance) != 1:
return False
return True
@cached_property
def Toffsets(self):
return LabeledVector.transpose(*self.offsets)
@cached_property
def dimensions(self):
return frozenset(i for i, _ in self.Toffsets)
class Group(tuple):
"""
A collection of aliasing expressions.
"""
def __repr__(self):
return "Group(%s)" % ", ".join([str(i) for i in self])
def find_rotation_distance(self, d, interval):
"""
The distance from the Group pivot of a rotation along Dimension ``d`` that
can safely iterate over the ``interval``.
"""
assert d is interval.dim
for rotation, distance in self._pivot_legal_rotations[d]:
# Does `rotation` cover the `interval` ?
if rotation.union(interval) != rotation:
continue
# Infer the `rotation`'s min_intervals from the pivot's
min_interval = self._pivot_min_intervals[d].translate(-distance)
# Does the `interval` actually cover the `rotation`'s `min_interval`?
if interval.union(min_interval) == interval:
return distance
return None
@cached_property
def Toffsets(self):
return [LabeledVector.transpose(*i) for i in zip(*[i.offsets for i in self])]
@cached_property
def diameter(self):
"""
The size of the iteration space required to evaluate all aliasing expressions
in this Group, along each Dimension.
"""
ret = defaultdict(int)
for i in self.Toffsets:
for d, v in i:
try:
distance = int(max(v) - min(v))
except TypeError:
# An entry in `v` has symbolic components, e.g. `x_m + 2`
if len(set(v)) == 1:
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret
@property
def pivot(self):
"""
A deterministically chosen Candidate for this Group.
"""
return self[0]
@property
def dimensions(self):
return self.pivot.dimensions
@property
def dimensions_translated(self):
return frozenset(d for d, v in self.diameter.items() if v > 0)
@cached_property
def _pivot_legal_rotations(self):
"""
All legal rotations along each Dimension for the Group pivot.
"""
ret = {}
for d, (maxd, mini) in self._pivot_legal_shifts.items():
# Rotation size = mini (min-increment) - maxd (max-decrement)
v = mini - maxd
# Build the table of all possible rotations
m = make_rotations_table(d, v)
distances = []
for rotation in m:
# Distance of the rotation `i` from `c`
distance = maxd - rotation.lower
assert distance == mini - rotation.upper
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret
@cached_property
def _pivot_min_intervals(self):
"""
The minimum Interval along each Dimension such that by evaluating the
pivot, all Candidates are evaluated too.
"""
c = self.pivot
ret = defaultdict(lambda: [np.inf, -np.inf])
for i in self:
distance = [o.distance(v) for o, v in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for d, v in LabeledVector.transpose(*distance)]
for d, v in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for d, (m, M) in ret.items()}
return ret
@cached_property
def _pivot_legal_shifts(self):
"""
The max decrement and min increment along each Dimension such that the
Group pivot does not go OOB.
"""
c = self.pivot
ret = defaultdict(lambda: (-np.inf, np.inf))
for i, ofs in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
# `f`'s cumulative halo size along `l`
hsize = sum(f._size_halo[l])
# Any `ofs`'s shift due to non-[0,0] iteration space
lower, upper = c.shifts[l].offsets
try:
# Assume `ofs[d]` is a number (typical case)
maxd = min(0, max(ret[l][0], -ofs[l] - lower))
mini = max(0, min(ret[l][1], hsize - ofs[l] - upper))
ret[l] = (maxd, mini)
except TypeError:
# E.g., `ofs[d] = x_m - x + 5`
ret[l] = (0, 0)
return ret
AliasedGroup = namedtuple('AliasedGroup', 'intervals aliaseds distances')
ScheduledAlias = namedtuple('ScheduledAlias', 'alias writeto ispace aliaseds indicess')
ScheduledAlias.__new__.__defaults__ = (None,) * len(ScheduledAlias._fields)
SpacePoint = namedtuple('SpacePoint', 'schedule exprs score')
class Schedule(tuple):
def __new__(cls, *items, dmapper=None, rmapper=None):
obj = super(Schedule, cls).__new__(cls, items)
obj.dmapper = dmapper or {}
obj.rmapper = rmapper or {}
return obj
class AliasMapper(OrderedDict):
def add(self, alias, intervals, aliaseds, distances):
assert len(aliaseds) == len(distances)
self[alias] = AliasedGroup(intervals, aliaseds, distances)
def update(self, aliases):
for k, v in aliases.items():
try:
v0 = self[k]
if v0.intervals != v.intervals:
raise ValueError
v0.aliaseds.extend(v.aliaseds)
v0.distances.extend(v.distances)
except KeyError:
self[k] = v
@property
def aliaseds(self):
return flatten(i.aliaseds for i in self.values())
def make_rotations_table(d, v):
"""
All possible rotations of `range(v+1)`.
"""
m = np.array([[j-i if j > i else 0 for j in range(v+1)] for i in range(v+1)])
m = (m - m.T)[::-1, :]
# Shift the table so that the middle rotation is at the top
m = np.roll(m, int(-np.floor(v/2)), axis=0)
# Turn into a more compact representation as a list of Intervals
m = [Interval(d, min(i), max(i)) for i in m]
return m
def cit(ispace0, ispace1):
"""
The Common IterationIntervals of two IterationSpaces.
"""
found = []
for it0, it1 in zip(ispace0.itintervals, ispace1.itintervals):
if it0 == it1:
found.append(it0)
else:
break
return tuple(found)
def maybe_coeff_key(grid, expr):
"""
True if `expr` could be the coefficient of an FD derivative, False otherwise.
"""
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(not set(grid.dimensions) <= set(i.function.dimensions) for i in indexeds)
def wset(exprs):
"""
Extract the working set out of a set of equations.
"""
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)])
if i.function.is_AbstractFunction}
def potential_max_deriv_order(exprs):
"""
The maximum FD derivative order in a list of expressions.
"""
# NOTE: e might propagate the Derivative(...) information down from the
# symbolic language, but users may do crazy things and write their own custom
# expansions "by hand" (i.e., not resorting to Derivative(...)), hence instead
# of looking for Derivative(...) we use the following heuristic:
# add(mul, mul, ...) -> stems from first order derivative
# add(mul(add(mul, mul, ...), ...), ...) -> stems from second order derivative
# ...
nadds = lambda e: (int(e.is_Add) +
max([nadds(a) for a in e.args], default=0) if not q_leaf(e) else 0)
return max([nadds(e) for e in exprs], default=0)
def search_potential_deriv(expr, n, c=0):
"""
Retrieve the expressions at depth `n` that potentially stem from FD derivatives.
"""
assert n >= c >= 0
if q_leaf(expr) or expr.is_Pow:
return []
elif expr.is_Mul:
if c == n:
return [expr]
else:
return flatten([search_potential_deriv(a, n, c+1) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args])
|
create_working_dir
|
Creates a directory in `basedir` with a prefix of `dirprefix`.
The directory will have a random 5 character string appended to `dirprefix`.
Returns the path to the working directory.
:rtype : str
:param basedir: str, the directory in which to create the working directory
:param dirprefix: str, prefix to prepend to the working directory
|
#!/usr/bin/env python
import os
import sys
import platform
import tempfile
import urllib2
import shutil
import boto
from boto.exception import BotoClientError
def merge_dicts(a, b):
"""
Merge two dictionaries. If there is a key collision, `b` overrides `a`.
:param a: Dictionary of default settings
:param b: Dictionary of override settings
:rtype : dict
"""
try:
a.update(b)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n'
'{0}\n\n'
'Dictionary B:\n\n'
'{1}\n\n'
'Exception: {2}'
.format(a, b, exc))
return a
def get_scripts_to_execute(system, workingdir, **scriptparams):
"""
Returns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.
'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.
'Parameters' is a hashtable of parameters to pass to the script.
Use `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.
:param system: str, the system type as returned from `platform.system`
:param workingdir: str, the working directory where content should be saved
:param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts
:rtype : dict
"""
if 'Linux' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py",
'Parameters': merge_dicts({
'yumrepomap': [
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo',
'dist': 'amazon',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'redhat',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'centos',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'redhat',
'epel_version': '7',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'centos',
'epel_version': '7',
},
],
}, scriptparams)
},
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py",
'Parameters': merge_dicts({
'saltinstallmethod': 'yum',
'saltcontentsource': "https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip",
"https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip",
],
'formulaterminationstrings': [
"-master",
"-latest",
],
'saltstates': 'Highstate',
'entenv': 'False',
'salt_results_log': '/var/log/saltcall.results.log',
'salt_debug_log': '/var/log/saltcall.debug.log',
'sourceiss3bucket': 'True',
}, scriptparams)
},
)
elif 'Windows' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1",
'Parameters': merge_dicts({
'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir),
'saltcontentsource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip",
],
'formulaterminationstrings': [
"-latest",
],
'ashrole': "MemberServer",
'entenv': 'False',
'saltstates': "Highstate",
}, scriptparams)
},
)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute
# MASKED: create_working_dir function (lines 131-148)
def get_system_params(system):
"""
Returns a dictionary of OS platform-specific parameters.
:param system: str, the system type as returned by `platform.system`
:rtype : dict
"""
a = {}
workingdirprefix = 'systemprep-'
if 'Linux' in system:
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif 'Windows' in system:
#TODO: Add and test the Windows parameters/functionality
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\system32\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a
def download_file(url, filename, sourceiss3bucket=None):
"""
Download the file from `url` and save it locally under `filename`.
:rtype : bool
:param url:
:param filename:
:param sourceiss3bucket:
"""
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
print('Downloaded file from S3 bucket -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Unable to download file from web server.\n'
'url = {0}\n'
'filename = {1}\n'
'Exception: {2}'
.format(url, filename, exc))
print('Downloaded file from web server -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
return True
def cleanup(workingdir):
"""
Removes temporary files loaded to the system.
:param workingdir: str, Path to the working directory
:return: bool
"""
print('+-' * 40)
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Cleanup Failed!\n'
'Exception: {0}'.format(exc))
print('Removed temporary data in working directory -- ' + workingdir)
print('Exiting cleanup routine...')
print('-+' * 40)
return True
def main(noreboot = 'false', **kwargs):
"""
Master script that calls content scripts to be deployed when provisioning systems
"""
# NOTE: Using __file__ may freeze if trying to build an executable, e.g. via py2exe.
# NOTE: Using __file__ does not work if running from IDLE/interpreter.
# NOTE: __file__ may return relative path as opposed to an absolute path, so include os.path.abspath.
scriptname = ''
if '__file__' in dir():
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
# Check special parameter types
noreboot = 'true' == noreboot.lower()
sourceiss3bucket = 'true' == kwargs.get('sourceiss3bucket', 'false').lower()
print('+' * 80)
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for key, value in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
#Loop through each 'script' in scriptstoexecute
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[-1]
fullfilepath = systemparams['workingdir'] + systemparams['pathseparator'] + filename
#Download each script, script['ScriptSource']
download_file(url, fullfilepath, sourceiss3bucket)
#Execute each script, passing it the parameters in script['Parameters']
#TODO: figure out if there's a better way to call and execute the script
print('Running script -- ' + script['ScriptSource'])
print('Sending parameters --')
for key, value in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join("%s='%s'" % (key, val) for (key, val) in script['Parameters'].iteritems())
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if result is not 0:
message = 'Encountered an unrecoverable error executing a ' \
'content script. Exiting with failure.\n' \
'Command executed: {0}' \
.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print('-' * 80)
if "__main__" == __name__:
# Convert command line parameters of the form `param=value` to a dictionary.
# NOTE: Keys are stored in lowercase format.
kwargs = {}
for x in sys.argv[1:]:
if '=' in x:
[key, value] = x.split('=', 1)
kwargs[key.lower()] = value
else:
message = 'Encountered a parameter that does not have = in it.'
raise SystemError(message)
# NOTE: We are unpacking kwargs to obtain the noreboot parameter for the main
# definition. The rest are packed back into kwargs.
# TODO: This is not necessary and consumes a minor overhead. I would just pass along the dictionary.
# However, since we will be moving to using argparse, this will become obsolete.
main(**kwargs)
|
def create_working_dir(basedir, dirprefix):
"""
Creates a directory in `basedir` with a prefix of `dirprefix`.
The directory will have a random 5 character string appended to `dirprefix`.
Returns the path to the working directory.
:rtype : str
:param basedir: str, the directory in which to create the working directory
:param dirprefix: str, prefix to prepend to the working directory
"""
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Could not create workingdir in {0}.\n'
'Exception: {1}'.format(basedir, exc))
return workingdir
| 131
| 148
|
#!/usr/bin/env python
import os
import sys
import platform
import tempfile
import urllib2
import shutil
import boto
from boto.exception import BotoClientError
def merge_dicts(a, b):
"""
Merge two dictionaries. If there is a key collision, `b` overrides `a`.
:param a: Dictionary of default settings
:param b: Dictionary of override settings
:rtype : dict
"""
try:
a.update(b)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n'
'{0}\n\n'
'Dictionary B:\n\n'
'{1}\n\n'
'Exception: {2}'
.format(a, b, exc))
return a
def get_scripts_to_execute(system, workingdir, **scriptparams):
"""
Returns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.
'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.
'Parameters' is a hashtable of parameters to pass to the script.
Use `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.
:param system: str, the system type as returned from `platform.system`
:param workingdir: str, the working directory where content should be saved
:param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts
:rtype : dict
"""
if 'Linux' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py",
'Parameters': merge_dicts({
'yumrepomap': [
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo',
'dist': 'amazon',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'redhat',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'centos',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'redhat',
'epel_version': '7',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'centos',
'epel_version': '7',
},
],
}, scriptparams)
},
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py",
'Parameters': merge_dicts({
'saltinstallmethod': 'yum',
'saltcontentsource': "https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip",
"https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip",
],
'formulaterminationstrings': [
"-master",
"-latest",
],
'saltstates': 'Highstate',
'entenv': 'False',
'salt_results_log': '/var/log/saltcall.results.log',
'salt_debug_log': '/var/log/saltcall.debug.log',
'sourceiss3bucket': 'True',
}, scriptparams)
},
)
elif 'Windows' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1",
'Parameters': merge_dicts({
'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir),
'saltcontentsource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip",
],
'formulaterminationstrings': [
"-latest",
],
'ashrole': "MemberServer",
'entenv': 'False',
'saltstates': "Highstate",
}, scriptparams)
},
)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute
def create_working_dir(basedir, dirprefix):
"""
Creates a directory in `basedir` with a prefix of `dirprefix`.
The directory will have a random 5 character string appended to `dirprefix`.
Returns the path to the working directory.
:rtype : str
:param basedir: str, the directory in which to create the working directory
:param dirprefix: str, prefix to prepend to the working directory
"""
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Could not create workingdir in {0}.\n'
'Exception: {1}'.format(basedir, exc))
return workingdir
def get_system_params(system):
"""
Returns a dictionary of OS platform-specific parameters.
:param system: str, the system type as returned by `platform.system`
:rtype : dict
"""
a = {}
workingdirprefix = 'systemprep-'
if 'Linux' in system:
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif 'Windows' in system:
#TODO: Add and test the Windows parameters/functionality
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\system32\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a
def download_file(url, filename, sourceiss3bucket=None):
"""
Download the file from `url` and save it locally under `filename`.
:rtype : bool
:param url:
:param filename:
:param sourceiss3bucket:
"""
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
print('Downloaded file from S3 bucket -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Unable to download file from web server.\n'
'url = {0}\n'
'filename = {1}\n'
'Exception: {2}'
.format(url, filename, exc))
print('Downloaded file from web server -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
return True
def cleanup(workingdir):
"""
Removes temporary files loaded to the system.
:param workingdir: str, Path to the working directory
:return: bool
"""
print('+-' * 40)
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Cleanup Failed!\n'
'Exception: {0}'.format(exc))
print('Removed temporary data in working directory -- ' + workingdir)
print('Exiting cleanup routine...')
print('-+' * 40)
return True
def main(noreboot = 'false', **kwargs):
"""
Master script that calls content scripts to be deployed when provisioning systems
"""
# NOTE: Using __file__ may freeze if trying to build an executable, e.g. via py2exe.
# NOTE: Using __file__ does not work if running from IDLE/interpreter.
# NOTE: __file__ may return relative path as opposed to an absolute path, so include os.path.abspath.
scriptname = ''
if '__file__' in dir():
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
# Check special parameter types
noreboot = 'true' == noreboot.lower()
sourceiss3bucket = 'true' == kwargs.get('sourceiss3bucket', 'false').lower()
print('+' * 80)
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for key, value in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
#Loop through each 'script' in scriptstoexecute
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[-1]
fullfilepath = systemparams['workingdir'] + systemparams['pathseparator'] + filename
#Download each script, script['ScriptSource']
download_file(url, fullfilepath, sourceiss3bucket)
#Execute each script, passing it the parameters in script['Parameters']
#TODO: figure out if there's a better way to call and execute the script
print('Running script -- ' + script['ScriptSource'])
print('Sending parameters --')
for key, value in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join("%s='%s'" % (key, val) for (key, val) in script['Parameters'].iteritems())
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if result is not 0:
message = 'Encountered an unrecoverable error executing a ' \
'content script. Exiting with failure.\n' \
'Command executed: {0}' \
.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print('-' * 80)
if "__main__" == __name__:
# Convert command line parameters of the form `param=value` to a dictionary.
# NOTE: Keys are stored in lowercase format.
kwargs = {}
for x in sys.argv[1:]:
if '=' in x:
[key, value] = x.split('=', 1)
kwargs[key.lower()] = value
else:
message = 'Encountered a parameter that does not have = in it.'
raise SystemError(message)
# NOTE: We are unpacking kwargs to obtain the noreboot parameter for the main
# definition. The rest are packed back into kwargs.
# TODO: This is not necessary and consumes a minor overhead. I would just pass along the dictionary.
# However, since we will be moving to using argparse, this will become obsolete.
main(**kwargs)
|
get_system_params
|
Returns a dictionary of OS platform-specific parameters.
:param system: str, the system type as returned by `platform.system`
:rtype : dict
|
#!/usr/bin/env python
import os
import sys
import platform
import tempfile
import urllib2
import shutil
import boto
from boto.exception import BotoClientError
def merge_dicts(a, b):
"""
Merge two dictionaries. If there is a key collision, `b` overrides `a`.
:param a: Dictionary of default settings
:param b: Dictionary of override settings
:rtype : dict
"""
try:
a.update(b)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n'
'{0}\n\n'
'Dictionary B:\n\n'
'{1}\n\n'
'Exception: {2}'
.format(a, b, exc))
return a
def get_scripts_to_execute(system, workingdir, **scriptparams):
"""
Returns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.
'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.
'Parameters' is a hashtable of parameters to pass to the script.
Use `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.
:param system: str, the system type as returned from `platform.system`
:param workingdir: str, the working directory where content should be saved
:param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts
:rtype : dict
"""
if 'Linux' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py",
'Parameters': merge_dicts({
'yumrepomap': [
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo',
'dist': 'amazon',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'redhat',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'centos',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'redhat',
'epel_version': '7',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'centos',
'epel_version': '7',
},
],
}, scriptparams)
},
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py",
'Parameters': merge_dicts({
'saltinstallmethod': 'yum',
'saltcontentsource': "https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip",
"https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip",
],
'formulaterminationstrings': [
"-master",
"-latest",
],
'saltstates': 'Highstate',
'entenv': 'False',
'salt_results_log': '/var/log/saltcall.results.log',
'salt_debug_log': '/var/log/saltcall.debug.log',
'sourceiss3bucket': 'True',
}, scriptparams)
},
)
elif 'Windows' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1",
'Parameters': merge_dicts({
'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir),
'saltcontentsource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip",
],
'formulaterminationstrings': [
"-latest",
],
'ashrole': "MemberServer",
'entenv': 'False',
'saltstates': "Highstate",
}, scriptparams)
},
)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute
def create_working_dir(basedir, dirprefix):
"""
Creates a directory in `basedir` with a prefix of `dirprefix`.
The directory will have a random 5 character string appended to `dirprefix`.
Returns the path to the working directory.
:rtype : str
:param basedir: str, the directory in which to create the working directory
:param dirprefix: str, prefix to prepend to the working directory
"""
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Could not create workingdir in {0}.\n'
'Exception: {1}'.format(basedir, exc))
return workingdir
# MASKED: get_system_params function (lines 151-179)
def download_file(url, filename, sourceiss3bucket=None):
"""
Download the file from `url` and save it locally under `filename`.
:rtype : bool
:param url:
:param filename:
:param sourceiss3bucket:
"""
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
print('Downloaded file from S3 bucket -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Unable to download file from web server.\n'
'url = {0}\n'
'filename = {1}\n'
'Exception: {2}'
.format(url, filename, exc))
print('Downloaded file from web server -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
return True
def cleanup(workingdir):
"""
Removes temporary files loaded to the system.
:param workingdir: str, Path to the working directory
:return: bool
"""
print('+-' * 40)
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Cleanup Failed!\n'
'Exception: {0}'.format(exc))
print('Removed temporary data in working directory -- ' + workingdir)
print('Exiting cleanup routine...')
print('-+' * 40)
return True
def main(noreboot = 'false', **kwargs):
"""
Master script that calls content scripts to be deployed when provisioning systems
"""
# NOTE: Using __file__ may freeze if trying to build an executable, e.g. via py2exe.
# NOTE: Using __file__ does not work if running from IDLE/interpreter.
# NOTE: __file__ may return relative path as opposed to an absolute path, so include os.path.abspath.
scriptname = ''
if '__file__' in dir():
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
# Check special parameter types
noreboot = 'true' == noreboot.lower()
sourceiss3bucket = 'true' == kwargs.get('sourceiss3bucket', 'false').lower()
print('+' * 80)
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for key, value in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
#Loop through each 'script' in scriptstoexecute
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[-1]
fullfilepath = systemparams['workingdir'] + systemparams['pathseparator'] + filename
#Download each script, script['ScriptSource']
download_file(url, fullfilepath, sourceiss3bucket)
#Execute each script, passing it the parameters in script['Parameters']
#TODO: figure out if there's a better way to call and execute the script
print('Running script -- ' + script['ScriptSource'])
print('Sending parameters --')
for key, value in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join("%s='%s'" % (key, val) for (key, val) in script['Parameters'].iteritems())
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if result is not 0:
message = 'Encountered an unrecoverable error executing a ' \
'content script. Exiting with failure.\n' \
'Command executed: {0}' \
.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print('-' * 80)
if "__main__" == __name__:
# Convert command line parameters of the form `param=value` to a dictionary.
# NOTE: Keys are stored in lowercase format.
kwargs = {}
for x in sys.argv[1:]:
if '=' in x:
[key, value] = x.split('=', 1)
kwargs[key.lower()] = value
else:
message = 'Encountered a parameter that does not have = in it.'
raise SystemError(message)
# NOTE: We are unpacking kwargs to obtain the noreboot parameter for the main
# definition. The rest are packed back into kwargs.
# TODO: This is not necessary and consumes a minor overhead. I would just pass along the dictionary.
# However, since we will be moving to using argparse, this will become obsolete.
main(**kwargs)
|
def get_system_params(system):
"""
Returns a dictionary of OS platform-specific parameters.
:param system: str, the system type as returned by `platform.system`
:rtype : dict
"""
a = {}
workingdirprefix = 'systemprep-'
if 'Linux' in system:
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif 'Windows' in system:
#TODO: Add and test the Windows parameters/functionality
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\system32\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a
| 151
| 179
|
#!/usr/bin/env python
import os
import sys
import platform
import tempfile
import urllib2
import shutil
import boto
from boto.exception import BotoClientError
def merge_dicts(a, b):
"""
Merge two dictionaries. If there is a key collision, `b` overrides `a`.
:param a: Dictionary of default settings
:param b: Dictionary of override settings
:rtype : dict
"""
try:
a.update(b)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n'
'{0}\n\n'
'Dictionary B:\n\n'
'{1}\n\n'
'Exception: {2}'
.format(a, b, exc))
return a
def get_scripts_to_execute(system, workingdir, **scriptparams):
"""
Returns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.
'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.
'Parameters' is a hashtable of parameters to pass to the script.
Use `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.
:param system: str, the system type as returned from `platform.system`
:param workingdir: str, the working directory where content should be saved
:param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts
:rtype : dict
"""
if 'Linux' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py",
'Parameters': merge_dicts({
'yumrepomap': [
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo',
'dist': 'amazon',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'redhat',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo',
'dist': 'centos',
'epel_version': '6',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'redhat',
'epel_version': '7',
},
{
'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo',
'dist': 'centos',
'epel_version': '7',
},
],
}, scriptparams)
},
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py",
'Parameters': merge_dicts({
'saltinstallmethod': 'yum',
'saltcontentsource': "https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip",
"https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip",
],
'formulaterminationstrings': [
"-master",
"-latest",
],
'saltstates': 'Highstate',
'entenv': 'False',
'salt_results_log': '/var/log/saltcall.results.log',
'salt_debug_log': '/var/log/saltcall.debug.log',
'sourceiss3bucket': 'True',
}, scriptparams)
},
)
elif 'Windows' in system:
scriptstoexecute = (
{
'ScriptSource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1",
'Parameters': merge_dicts({
'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir),
'saltcontentsource': "https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip",
'formulastoinclude': [
"https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip",
"https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip",
],
'formulaterminationstrings': [
"-latest",
],
'ashrole': "MemberServer",
'entenv': 'False',
'saltstates': "Highstate",
}, scriptparams)
},
)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute
def create_working_dir(basedir, dirprefix):
"""
Creates a directory in `basedir` with a prefix of `dirprefix`.
The directory will have a random 5 character string appended to `dirprefix`.
Returns the path to the working directory.
:rtype : str
:param basedir: str, the directory in which to create the working directory
:param dirprefix: str, prefix to prepend to the working directory
"""
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Could not create workingdir in {0}.\n'
'Exception: {1}'.format(basedir, exc))
return workingdir
def get_system_params(system):
"""
Returns a dictionary of OS platform-specific parameters.
:param system: str, the system type as returned by `platform.system`
:rtype : dict
"""
a = {}
workingdirprefix = 'systemprep-'
if 'Linux' in system:
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif 'Windows' in system:
#TODO: Add and test the Windows parameters/functionality
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\system32\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
#TODO: Update `except` logic
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a
def download_file(url, filename, sourceiss3bucket=None):
"""
Download the file from `url` and save it locally under `filename`.
:rtype : bool
:param url:
:param filename:
:param sourceiss3bucket:
"""
conn = None
if sourceiss3bucket:
bucket_name = url.split('/')[3]
key_name = '/'.join(url.split('/')[4:])
try:
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except (NameError, BotoClientError):
try:
bucket_name = url.split('/')[2].split('.')[0]
key_name = '/'.join(url.split('/')[3:])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_filename(filename=filename)
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
except Exception as exc:
raise SystemError('Unable to download file from S3 bucket.\n'
'url = {0}\n'
'bucket = {1}\n'
'key = {2}\n'
'file = {3}\n'
'Exception: {4}'
.format(url, bucket_name, key_name,
filename, exc))
print('Downloaded file from S3 bucket -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
else:
try:
response = urllib2.urlopen(url)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Unable to download file from web server.\n'
'url = {0}\n'
'filename = {1}\n'
'Exception: {2}'
.format(url, filename, exc))
print('Downloaded file from web server -- \n'
' url = {0}\n'
' filename = {1}'.format(url, filename))
return True
def cleanup(workingdir):
"""
Removes temporary files loaded to the system.
:param workingdir: str, Path to the working directory
:return: bool
"""
print('+-' * 40)
print('Cleanup Time...')
try:
shutil.rmtree(workingdir)
except Exception as exc:
#TODO: Update `except` logic
raise SystemError('Cleanup Failed!\n'
'Exception: {0}'.format(exc))
print('Removed temporary data in working directory -- ' + workingdir)
print('Exiting cleanup routine...')
print('-+' * 40)
return True
def main(noreboot = 'false', **kwargs):
"""
Master script that calls content scripts to be deployed when provisioning systems
"""
# NOTE: Using __file__ may freeze if trying to build an executable, e.g. via py2exe.
# NOTE: Using __file__ does not work if running from IDLE/interpreter.
# NOTE: __file__ may return relative path as opposed to an absolute path, so include os.path.abspath.
scriptname = ''
if '__file__' in dir():
scriptname = os.path.abspath(__file__)
else:
scriptname = os.path.abspath(sys.argv[0])
# Check special parameter types
noreboot = 'true' == noreboot.lower()
sourceiss3bucket = 'true' == kwargs.get('sourceiss3bucket', 'false').lower()
print('+' * 80)
print('Entering script -- {0}'.format(scriptname))
print('Printing parameters --')
print(' noreboot = {0}'.format(noreboot))
for key, value in kwargs.items():
print(' {0} = {1}'.format(key, value))
system = platform.system()
systemparams = get_system_params(system)
scriptstoexecute = get_scripts_to_execute(system, systemparams['workingdir'], **kwargs)
#Loop through each 'script' in scriptstoexecute
for script in scriptstoexecute:
url = script['ScriptSource']
filename = url.split('/')[-1]
fullfilepath = systemparams['workingdir'] + systemparams['pathseparator'] + filename
#Download each script, script['ScriptSource']
download_file(url, fullfilepath, sourceiss3bucket)
#Execute each script, passing it the parameters in script['Parameters']
#TODO: figure out if there's a better way to call and execute the script
print('Running script -- ' + script['ScriptSource'])
print('Sending parameters --')
for key, value in script['Parameters'].items():
print(' {0} = {1}'.format(key, value))
paramstring = ' '.join("%s='%s'" % (key, val) for (key, val) in script['Parameters'].iteritems())
fullcommand = 'python {0} {1}'.format(fullfilepath, paramstring)
result = os.system(fullcommand)
if result is not 0:
message = 'Encountered an unrecoverable error executing a ' \
'content script. Exiting with failure.\n' \
'Command executed: {0}' \
.format(fullcommand)
raise SystemError(message)
cleanup(systemparams['workingdir'])
if noreboot:
print('Detected `noreboot` switch. System will not be rebooted.')
else:
print('Reboot scheduled. System will reboot after the script exits.')
os.system(systemparams['restart'])
print('{0} complete!'.format(scriptname))
print('-' * 80)
if "__main__" == __name__:
# Convert command line parameters of the form `param=value` to a dictionary.
# NOTE: Keys are stored in lowercase format.
kwargs = {}
for x in sys.argv[1:]:
if '=' in x:
[key, value] = x.split('=', 1)
kwargs[key.lower()] = value
else:
message = 'Encountered a parameter that does not have = in it.'
raise SystemError(message)
# NOTE: We are unpacking kwargs to obtain the noreboot parameter for the main
# definition. The rest are packed back into kwargs.
# TODO: This is not necessary and consumes a minor overhead. I would just pass along the dictionary.
# However, since we will be moving to using argparse, this will become obsolete.
main(**kwargs)
|
score
|
Log probability for a given data `x`.
Attributes
----------
x : ndarray
Data to evaluate.
%(_doc_default_callparams)s
Returns
-------
log_prob : float
The log probability of the data.
|
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import numpy as np
from scipy.misc import doccer
from ...stats import nonuniform
from ...auxiliary.array import normalize, nunique, accum
__all__ = ['markov']
_doc_default_callparams = """\
startprob : array_like
Start probabilities.
transmat : array_like
Transition matrix.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
}
# noinspection PyPep8Naming
class markov_gen(object):
"""Markov model.
The `startprob` keyword specifies the start probabilities for the model.
The `transmat` keyword specifies the transition probabilities the model
follows.
Methods
-------
score(x, startprob, transmat)
Log probability of the given data `x`.
sample(x, startprob, transmat, size=1)
Draw random samples from a Markov model.
fit(x)
Fits a Markov model from data via MLE or MAP.
Parameters
----------
%(_doc_default_callparams)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Markov model:
rv = normal_invwishart(startprob=None, transmat=None)
- Frozen object with the same methods but holding the given
start probabilities and transitions fixed.
Examples
--------
>>> from mlpy.stats.models import markov
>>> startprob = np.array([0.1, 0.4, 0.5])
>>> transmat = np.array([[0.3, 0.2, 0.5], [0.6, 0.3, 0.1], [0.1, 0.5, 0.4]])
>>> m = markov(startprob, transmat)
>>> m.sample(size=2)
[[2 2]]
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self):
super(markov_gen, self).__init__()
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, startprob, transmat):
markov_frozen(startprob, transmat)
# MASKED: score function (lines 91-117)
def sample(self, startprob, transmat, size=1):
"""Sample from a Markov model.
Attributes
----------
size: int
Defining number of sampled variates. Defaults to `1`.
Returns
-------
vals: ndarray
The sampled sequences of size (nseq, seqlen).
"""
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
nseq, seqlen = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][t - 1]])
return vals
def fit(self, x):
"""Fit a Markov model from data via MLE or MAP.
Attributes
----------
x : ndarray[int]
Observed data
Returns
-------
%(_doc_default_callparams)s
"""
# TODO: allow to pass pseudo_counts as parameter?
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = normalize(np.bincount(x[:, 0])) + pi_pseudo_counts - 1
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates))
transmat = normalize(counts + transmat_pseudo_counts - 1, 1)
return startprob, transmat
markov = markov_gen()
# noinspection PyPep8Naming
class markov_frozen(object):
def __init__(self, startprob, transmat):
"""Create a "frozen" Markov model.
Parameters
----------
startprob : array_like
Start probabilities
transmat : array_like
Transition matrix
"""
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat
def score(self, x):
return self._model.score(x, self.startprob, self.transmat)
def sample(self, size=1):
return self._model.sample(self.startprob, self.transmat, size)
|
def score(self, x, startprob, transmat):
"""Log probability for a given data `x`.
Attributes
----------
x : ndarray
Data to evaluate.
%(_doc_default_callparams)s
Returns
-------
log_prob : float
The log probability of the data.
"""
log_transmat = np.log(transmat + np.finfo(float).eps)
log_startprob = np.log(startprob + np.finfo(float).eps)
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum(njk * log_transmat)
return logp + log_prior
| 91
| 117
|
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import numpy as np
from scipy.misc import doccer
from ...stats import nonuniform
from ...auxiliary.array import normalize, nunique, accum
__all__ = ['markov']
_doc_default_callparams = """\
startprob : array_like
Start probabilities.
transmat : array_like
Transition matrix.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
}
# noinspection PyPep8Naming
class markov_gen(object):
"""Markov model.
The `startprob` keyword specifies the start probabilities for the model.
The `transmat` keyword specifies the transition probabilities the model
follows.
Methods
-------
score(x, startprob, transmat)
Log probability of the given data `x`.
sample(x, startprob, transmat, size=1)
Draw random samples from a Markov model.
fit(x)
Fits a Markov model from data via MLE or MAP.
Parameters
----------
%(_doc_default_callparams)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Markov model:
rv = normal_invwishart(startprob=None, transmat=None)
- Frozen object with the same methods but holding the given
start probabilities and transitions fixed.
Examples
--------
>>> from mlpy.stats.models import markov
>>> startprob = np.array([0.1, 0.4, 0.5])
>>> transmat = np.array([[0.3, 0.2, 0.5], [0.6, 0.3, 0.1], [0.1, 0.5, 0.4]])
>>> m = markov(startprob, transmat)
>>> m.sample(size=2)
[[2 2]]
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self):
super(markov_gen, self).__init__()
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, startprob, transmat):
markov_frozen(startprob, transmat)
def score(self, x, startprob, transmat):
"""Log probability for a given data `x`.
Attributes
----------
x : ndarray
Data to evaluate.
%(_doc_default_callparams)s
Returns
-------
log_prob : float
The log probability of the data.
"""
log_transmat = np.log(transmat + np.finfo(float).eps)
log_startprob = np.log(startprob + np.finfo(float).eps)
log_prior = log_startprob[x[:, 0]]
n = x.shape[0]
nstates = log_startprob.shape[0]
logp = np.zeros(n)
for i in range(n):
njk = accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates), dtype=np.int32)
logp[i] = np.sum(njk * log_transmat)
return logp + log_prior
def sample(self, startprob, transmat, size=1):
"""Sample from a Markov model.
Attributes
----------
size: int
Defining number of sampled variates. Defaults to `1`.
Returns
-------
vals: ndarray
The sampled sequences of size (nseq, seqlen).
"""
if np.isscalar(size):
size = (1, size)
vals = np.zeros(size, dtype=np.int32)
nseq, seqlen = size
for i in range(nseq):
vals[i][0] = nonuniform.rvs(startprob)
for t in range(1, seqlen):
vals[i][t] = nonuniform.rvs(transmat[vals[i][t - 1]])
return vals
def fit(self, x):
"""Fit a Markov model from data via MLE or MAP.
Attributes
----------
x : ndarray[int]
Observed data
Returns
-------
%(_doc_default_callparams)s
"""
# TODO: allow to pass pseudo_counts as parameter?
nstates = nunique(x.ravel())
pi_pseudo_counts = np.ones(nstates)
transmat_pseudo_counts = np.ones((nstates, nstates))
n = x.shape[0]
startprob = normalize(np.bincount(x[:, 0])) + pi_pseudo_counts - 1
counts = np.zeros((nstates, nstates))
for i in range(n):
counts += accum(np.vstack([x[i, 0:-1], x[i, 1::]]).T, 1, size=(nstates, nstates))
transmat = normalize(counts + transmat_pseudo_counts - 1, 1)
return startprob, transmat
markov = markov_gen()
# noinspection PyPep8Naming
class markov_frozen(object):
def __init__(self, startprob, transmat):
"""Create a "frozen" Markov model.
Parameters
----------
startprob : array_like
Start probabilities
transmat : array_like
Transition matrix
"""
self._model = markov_gen()
self.startprob = startprob
self.transmat = transmat
def score(self, x):
return self._model.score(x, self.startprob, self.transmat)
def sample(self, size=1):
return self._model.sample(self.startprob, self.transmat, size)
|
getInputSpecification
|
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Polynomial Regression
"""
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
"""
# MASKED: getInputSpecification function (lines 30-43)
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
|
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
| 30
| 43
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Polynomial Regression
"""
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
|
handleInput
|
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Polynomial Regression
"""
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
# MASKED: handleInput function (lines 58-66)
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
|
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
| 58
| 66
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Polynomial Regression
"""
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
|
characterize
|
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Polynomial Regression
"""
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
# MASKED: characterize function (lines 68-92)
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
|
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
| 68
| 92
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Polynomial Regression
"""
import numpy as np
import utils.importerUtils
statsmodels = utils.importerUtils.importModuleLazy("statsmodels", globals())
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, utils
from .TimeSeriesAnalyzer import TimeSeriesCharacterizer, TimeSeriesGenerator
class PolynomialRegression(TimeSeriesGenerator, TimeSeriesCharacterizer):
"""
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(PolynomialRegression, cls).getInputSpecification()
specs.name = 'PolynomialRegression'
specs.description = """TimeSeriesAnalysis algorithm for fitting data of degree one or greater."""
specs.addSub(InputData.parameterInputFactory('degree', contentType=InputTypes.IntegerType,
descr="Specifies the degree polynomial to fit the data with."))
return specs
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifications
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['degree'] = spec.findFirst('degree').value
return settings
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, additional settings specific to this algorithm
@ Out, params, dict, characteristic parameters
"""
from sklearn.preprocessing import PolynomialFeatures
import statsmodels.api as sm
params = {target: {'model': {}} for target in targets}
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for target in targets:
results = sm.OLS(signal, xp).fit()
params[target]['model']['intercept'] = results.params[0]
for i, value in enumerate(results.params[1:]):
params[target]['model'][f'coef{i+1}'] = value
params[target]['model']['object'] = results
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
names.append(f'{base}__intercept')
for i in range(1,settings['degree']):
names.append(f'{base}__coef{i}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
for name, value in info['model'].items():
if name == 'object':
continue
rlz[f'{base}__{name}'] = value
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
from sklearn.preprocessing import PolynomialFeatures
synthetic = np.zeros((len(pivot), len(params)))
degree = settings['degree']
features = PolynomialFeatures(degree=degree)
xp = features.fit_transform(pivot.reshape(-1, 1))
for t, (target, _) in enumerate(params.items()):
model = params[target]['model']['object']
synthetic[:, t] = model.predict(xp)
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, trained parameters as from self.characterize
@ Out, None
"""
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
for name, value in info['model'].items():
if name == 'object':
continue
base.append(xmlUtils.newNode(name, text=f'{float(value):1.9e}'))
|
__init__
|
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
# MASKED: __init__ function (lines 17-46)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:documentdb/v20210615:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["group_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
| 17
| 46
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:documentdb/v20210615:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["group_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
get
|
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:documentdb/v20210615:PrivateEndpointConnection',
resource_name,
__props__,
opts)
# MASKED: get function (lines 223-245)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["group_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
| 223
| 245
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input['PrivateEndpointPropertyArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input['PrivateEndpointPropertyArgs'] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs'] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Cosmos DB database account name.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointPropertyArgs']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointPropertyArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStatePropertyArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] group_id: Group id of the private endpoint.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection State of the Private Endpoint Connection.
:param pulumi.Input[str] provisioning_state: Provisioning state of the private endpoint.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
group_id: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["group_id"] = group_id
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb/v20210615:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20190801preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210115:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210301preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210315:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210401preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210415:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210515:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:documentdb/v20210701preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:documentdb/v20210701preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:documentdb/v20210615:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["group_id"] = None
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Output[Optional[str]]:
"""
Group id of the private endpoint.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
"""
Connection State of the Private Endpoint Connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the private endpoint.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
__init__
|
The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
# MASKED: __init__ function (lines 157-178)
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
| 157
| 178
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
connect
|
Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
# MASKED: connect function (lines 263-359)
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
| 263
| 359
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
set_db
|
Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
# MASKED: set_db function (lines 528-563)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
| 528
| 563
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
resource
|
Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
# MASKED: resource function (lines 565-605)
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
| 565
| 605
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
create_database
|
Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
# MASKED: create_database function (lines 617-689)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
| 617
| 689
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
query_document
|
Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
# MASKED: query_document function (lines 852-917)
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
| 852
| 917
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
get_document
|
Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
# MASKED: get_document function (lines 919-968)
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
| 919
| 968
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
get_documents_by_type
|
Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
# MASKED: get_documents_by_type function (lines 970-1039)
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
| 970
| 1,039
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
get_all_documents
|
Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
# MASKED: get_all_documents function (lines 1041-1107)
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
| 1,041
| 1,107
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
has_doc
|
Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
# MASKED: has_doc function (lines 1450-1469)
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
| 1,450
| 1,469
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
get_class_frame
|
Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
# MASKED: get_class_frame function (lines 1471-1492)
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
| 1,471
| 1,492
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
reset
|
Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
# MASKED: reset function (lines 1827-1882)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
| 1,827
| 1,882
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
squash
|
Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
# MASKED: squash function (lines 1918-1973)
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
| 1,918
| 1,973
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
diff
|
Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
# MASKED: diff function (lines 1985-2052)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
| 1,985
| 2,052
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
patch
|
Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
# MASKED: patch function (lines 2054-2105)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
| 2,054
| 2,105
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
clonedb
|
Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
# MASKED: clonedb function (lines 2107-2143)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
| 2,107
| 2,143
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
_generate_commit
|
Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
# MASKED: _generate_commit function (lines 2145-2174)
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
| 2,145
| 2,174
|
"""woqlClient.py
WOQLClient is the Python public API for TerminusDB"""
import copy
import gzip
import json
import os
import urllib.parse as urlparse
import warnings
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import requests
from ..__version__ import __version__
from ..errors import DatabaseError, InterfaceError
from ..woql_utils import (
_clean_dict,
_dt_dict,
_dt_list,
_finish_response,
_result2stream,
)
from ..woqlquery.woql_query import WOQLQuery
# WOQL client object
# license Apache Version 2
# summary Python module for accessing the Terminus DB API
class JWTAuth(requests.auth.AuthBase):
"""Class for JWT Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["Authorization"] = f"Bearer {self._token}"
return r
class APITokenAuth(requests.auth.AuthBase):
"""Class for API Token Authentication in requests"""
def __init__(self, token):
self._token = token
def __call__(self, r):
r.headers["API_TOKEN"] = f"{self._token}"
return r
class ResourceType(Enum):
"""Enum for the different TerminusDB resources"""
DB = 1
META = 2
REPO = 3
COMMITS = 4
REF = 5
BRANCH = 6
class Patch:
def __init__(self, json=None):
if json:
self.from_json(json)
else:
self.content = None
@property
def update(self):
def swap_value(swap_item):
result_dict = {}
for key, item in swap_item.items():
if isinstance(item, dict):
operation = item.get("@op")
if operation is not None and operation == "SwapValue":
result_dict[key] = item.get("@after")
elif operation is None:
result_dict[key] = swap_value(item)
return result_dict
return swap_value(self.content)
@update.setter
def update(self):
raise Exception("Cannot set update for patch")
@update.deleter
def update(self):
raise Exception("Cannot delete update for patch")
@property
def before(self):
def extract_before(extract_item):
before_dict = {}
for key, item in extract_item.items():
if isinstance(item, dict):
value = item.get("@before")
if value is not None:
before_dict[key] = value
else:
before_dict[key] = extract_before(item)
else:
before_dict[key] = item
return before_dict
return extract_before(self.content)
@before.setter
def before(self):
raise Exception("Cannot set before for patch")
@before.deleter
def before(self):
raise Exception("Cannot delete before for patch")
def from_json(self, json_str):
content = json.loads(json_str)
if isinstance(content, dict):
self.content = _dt_dict(content)
else:
self.content = _dt_list(content)
def to_json(self):
return json.dumps(_clean_dict(self.content))
def copy(self):
return copy.deepcopy(self)
class WOQLClient:
"""Client for querying a TerminusDB server using WOQL queries.
Attributes
----------
server_url: str
URL of the server that this client connected.
api: str
API endpoint for this client.
team: str
Team that this client is using. "admin" for local dbs.
db: str
Database that this client is connected to.
user: str
TerminiusDB user that this client is using. "admin" for local dbs.
branch: str
Branch of the database that this client is connected to. Default to "main".
ref: str, None
Ref setting for the client. Default to None.
repo: str
Repo identifier of the database that this client is connected to. Default to "local".
"""
def __init__(self, server_url: str, **kwargs) -> None:
r"""The WOQLClient constructor.
Parameters
----------
server_url : str
URL of the server that this client will connect to.
\**kwargs
Extra configuration options
"""
self.server_url = server_url.strip("/")
self.api = f"{self.server_url}/api"
self._connected = False
# properties with get/setters
self._team = None
self._db = None
self._user = None
self._branch = None
self._ref = None
self._repo = None
@property
def team(self):
if isinstance(self._team, str):
return urlparse.unquote(self._team)
else:
return self._team
@team.setter
def team(self, value):
if isinstance(value, str):
self._team = urlparse.quote(value)
else:
self._team = value
@property
def db(self):
if isinstance(self._db, str):
return urlparse.unquote(self._db)
else:
return self._db
@db.setter
def db(self, value):
if isinstance(value, str):
self._db = urlparse.quote(value)
else:
self._db = value
@property
def user(self):
if isinstance(self._user, str):
return urlparse.unquote(self._user)
else:
return self._user
@user.setter
def user(self, value):
if isinstance(value, str):
self._user = urlparse.quote(value)
else:
self._user = value
@property
def branch(self):
if isinstance(self._branch, str):
return urlparse.unquote(self._branch)
else:
return self._branch
@branch.setter
def branch(self, value):
if isinstance(value, str):
self._branch = urlparse.quote(value)
else:
self._branch = value
@property
def repo(self):
if isinstance(self._repo, str):
return urlparse.unquote(self._repo)
else:
self._repo
@repo.setter
def repo(self, value):
if isinstance(value, str):
self._repo = urlparse.quote(value)
else:
self._repo = value
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, value):
if isinstance(value, str):
value = value.lower()
if value in ["local", "remote", None]:
self._ref = value
else:
raise ValueError("ref can only be 'local' or 'remote'")
def connect(
self,
team: str = "admin",
db: Optional[str] = None,
remote_auth: str = None,
use_token: bool = False,
jwt_token: Optional[str] = None,
api_token: Optional[str] = None,
key: str = "root",
user: str = "admin",
branch: str = "main",
ref: Optional[str] = None,
repo: str = "local",
**kwargs,
) -> None:
r"""Connect to a Terminus server at the given URI with an API key.
Stores the connection settings and necessary meta-data for the connected server. You need to connect before most database operations.
Parameters
----------
team: str
Name of the team, default to be "admin"
db: optional, str
Name of the database connected
remote_auth: optional, str
Remote Auth setting
key: optional, str
API key for connecting, default to be "root"
user: optional, str
Name of the user, default to be "admin"
use_token: bool
Use token to connect. If both `jwt_token` and `api_token` is not provided (None), then it will use the ENV variable TERMINUSDB_ACCESS_TOKEN to connect as the API token
jwt_token: optional, str
The Bearer JWT token to connect. Default to be None.
api_token: optional, strs
The API token to connect. Default to be None.
branch: optional, str
Branch to be connected, default to be "main"
ref: optional, str
Ref setting
repo: optional, str
Local or remote repo, default to be "local"
\**kwargs
Extra configuration options.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.connect(key="root", team="admin", user="admin", db="example_db")
"""
self.team = team
self.db = db
self._remote_auth = remote_auth
self._key = key
self.user = user
self._use_token = use_token
self._jwt_token = jwt_token
self._api_token = api_token
self.branch = branch
self.ref = ref
self.repo = repo
self._connected = True
try:
self._db_info = json.loads(
_finish_response(
requests.get(
self.api + "/info",
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
auth=self._auth(),
)
)
)
except Exception as error:
raise InterfaceError(
f"Cannot connect to server, please make sure TerminusDB is running at {self.server_url} and the authentication details are correct. Details: {str(error)}"
) from None
if self.db is not None:
try:
_finish_response(
requests.head(
self._db_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}"
},
params={"exists": "true"},
auth=self._auth(),
)
)
except DatabaseError:
raise InterfaceError(f"Connection fail, {self.db} does not exist.")
self._author = self.user
def close(self) -> None:
"""Undo connect and close the connection.
The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection, unless connect is call again."""
self._connected = False
def _check_connection(self, check_db=True) -> None:
"""Raise connection InterfaceError if not connected
Defaults to check if a db is connected"""
if not self._connected:
raise InterfaceError("Client is not connected to a TerminusDB server.")
if check_db and self.db is None:
raise InterfaceError(
"No database is connected. Please either connect to a database or create a new database."
)
def get_commit_history(self, max_history: int = 500) -> list:
"""Get the whole commit history.
Commit history - Commit id, author of the commit, commit message and the commit time, in the current branch from the current commit, ordered backwards in time, will be returned in a dictionary in the follow format:
{"commit_id":
{"author": "commit_author",
"message": "commit_message",
"timestamp: <datetime object of the timestamp>" }
}
Parameters
----------
max_history: int, optional
maximum number of commit that would return, counting backwards from your current commit. Default is set to 500. It need to be nop-negitive, if input is 0 it will still give the last commit.
Example
-------
>>> from terminusdb_client import WOQLClient
>>> client = WOQLClient("https://127.0.0.1:6363"
>>> client.connect(db="bank_balance_example")
>>> client.get_commit_history()
[{'commit': 's90wike9v5xibmrb661emxjs8k7ynwc', 'author': 'admin', 'message': 'Adding Jane', 'timestamp': datetime.da
tetime(2020, 9, 3, 15, 29, 34)}, {'commit': '1qhge8qlodajx93ovj67kvkrkxsw3pg', 'author': 'gavin@terminusdb.com', 'm
essage': 'Adding Jim', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': 'rciy1rfu5foj67ch00ow6f6n
njjxe3i', 'author': 'gavin@terminusdb.com', 'message': 'Update mike', 'timestamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}, {'commit': 'n4d86u8juzx852r2ekrega5hl838ovh', 'author': 'gavin@terminusdb.com', 'message': 'Add mike', '
timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '1vk2i8k8xce26p9jpi4zmq1h5vdqyuj', 'author': 'gav
in@terminusdb.com', 'message': 'Label for balance was wrong', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)
}, {'commit': '9si4na9zv2qol9b189y92fia7ac3hbg', 'author': 'gavin@terminusdb.com', 'message': 'Adding bank account
object to schema', 'timestamp': datetime.datetime(2020, 9, 3, 15, 29, 33)}, {'commit': '9egc4h0m36l5rbq1alr1fki6jbfu
kuv', 'author': 'TerminusDB', 'message': 'internal system operation', 'timstamp': datetime.datetime(2020, 9, 3, 15,
29, 33)}]
Returns
-------
list
"""
if max_history < 0:
raise ValueError("max_history needs to be non-negative.")
if max_history > 1:
limit_history = max_history - 1
else:
limit_history = 1
woql_query = (
WOQLQuery()
.using("_commits")
.limit(limit_history)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.path("v:commit", "parent*", "v:target_commit")
.triple("v:target_commit", "identifier", "v:cid")
.triple("v:target_commit", "author", "v:author")
.triple("v:target_commit", "message", "v:message")
.triple("v:target_commit", "timestamp", "v:timestamp")
)
result = self.query(woql_query).get("bindings")
if not result:
return result
else:
result_list = []
for result_item in result:
result_list.append(
{
"commit": result_item["cid"]["@value"],
"author": result_item["author"]["@value"],
"message": result_item["message"]["@value"],
"timestamp": datetime.fromtimestamp(
int(result_item["timestamp"]["@value"])
),
}
)
return result_list
def _get_current_commit(self):
woql_query = (
WOQLQuery()
.using("_commits")
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:commit", "identifier", "v:cid")
)
result = self.query(woql_query)
if not result:
return None
current_commit = result.get("bindings")[0].get("cid").get("@value")
return current_commit
def _get_target_commit(self, step):
woql_query = (
WOQLQuery()
.using("_commits")
.path(
"v:commit",
f"parent{{{step},{step}}}",
"v:target_commit",
)
.triple("v:branch", "name", WOQLQuery().string(self.branch))
.triple("v:branch", "head", "v:commit")
.triple("v:target_commit", "identifier", "v:cid")
)
result = self.query(woql_query)
target_commit = result.get("bindings")[0].get("cid").get("@value")
return target_commit
def get_all_branches(self, get_data_version=False):
"""Get all the branches available in the database."""
self._check_connection()
api_url = self._documents_url().split("/")
api_url = api_url[:-2]
api_url = "/".join(api_url) + "/_commits"
result = requests.get(
api_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params={"type": "Branch"},
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return list(_result2stream(result)), version
return list(_result2stream(_finish_response(result)))
def rollback(self, steps=1) -> None:
"""Curently not implementated. Please check back later.
Raises
----------
NotImplementedError
Since TerminusDB currently does not support open transactions. This method is not applicable to it's usage. To reset commit head, use WOQLClient.reset
"""
raise NotImplementedError(
"Open transactions are currently not supported. To reset commit head, check WOQLClient.reset"
)
def copy(self) -> "WOQLClient":
"""Create a deep copy of this client.
Returns
-------
WOQLClient
The copied client instance.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> clone = client.copy()
>>> assert client is not clone
"""
return copy.deepcopy(self)
def set_db(self, dbid: str, team: Optional[str] = None) -> str:
"""Set the connection to another database. This will reset the connection.
Parameters
----------
dbid : str
Database identifer to set in the config.
team : str
Team identifer to set in the config. If not passed in, it will use the current one.
Returns
-------
str
The current database identifier.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.set_db("database1")
'database1'
"""
self._check_connection(check_db=False)
if team is None:
team = self.team
return self.connect(
team=team,
db=dbid,
remote_auth=self._remote_auth,
key=self._key,
user=self.user,
branch=self.branch,
ref=self.ref,
repo=self.repo,
)
def resource(self, ttype: ResourceType, val: Optional[str] = None) -> str:
"""Create a resource identifier string based on the current config.
Parameters
----------
ttype : ResourceType
Type of resource.
val : str, optional
Branch or commit identifier.
Returns
-------
str
The constructed resource string.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363")
>>> client.resource(ResourceType.DB)
'<team>/<db>/'
>>> client.resource(ResourceType.META)
'<team>/<db>/_meta'
>>> client.resource(ResourceType.COMMITS)
'<team>/<db>/<repo>/_commits'
>>> client.resource(ResourceType.REF, "<reference>")
'<team>/<db>/<repo>/commit/<reference>'
>>> client.resource(ResourceType.BRANCH, "<branch>")
'<team>/<db>/<repo>/branch/<branch>'
"""
base = self.team + "/" + self.db + "/"
ref_value = val if val else self.ref
branch_value = val if val else self.branch
urls = {
ResourceType.DB: base,
ResourceType.META: f"{base}_meta",
ResourceType.REPO: f"{base}{self.repo}/_meta",
ResourceType.COMMITS: f"{base}{self.repo}/_commits",
ResourceType.REF: f"{base}{self.repo}/commit/{ref_value}",
ResourceType.BRANCH: f"{base}{self.repo}/{branch_value}",
}
return urls[ttype]
def _get_prefixes(self):
"""Get the prefixes for a given database"""
self._check_connection()
result = requests.get(
self._db_base("prefixes"),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def create_database(
self,
dbid: str,
team: Optional[str] = None,
label: Optional[str] = None,
description: Optional[str] = None,
prefixes: Optional[dict] = None,
include_schema: bool = True,
) -> None:
"""Create a TerminusDB database by posting
a terminus:Database document to the Terminus Server.
Parameters
----------
dbid : str
Unique identifier of the database.
team : str, optional
ID of the Team in which to create the DB (defaults to 'admin')
label : str, optional
Database name.
description : str, optional
Database description.
prefixes : dict, optional
Optional dict containing ``"@base"`` and ``"@schema"`` keys.
@base (str)
IRI to use when ``doc:`` prefixes are expanded. Defaults to ``terminusdb:///data``.
@schema (str)
IRI to use when ``scm:`` prefixes are expanded. Defaults to ``terminusdb:///schema``.
include_schema : bool
If ``True``, a main schema graph will be created, otherwise only a main instance graph will be created.
Raises
------
InterfaceError
if the client does not connect to a server
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.create_database("someDB", "admin", "Database Label", "My Description")
"""
self._check_connection(check_db=False)
details: Dict[str, Any] = {}
if label:
details["label"] = label
else:
details["label"] = dbid
if description:
details["comment"] = description
else:
details["comment"] = ""
if include_schema:
details["schema"] = True
if prefixes:
details["prefixes"] = prefixes
if team is None:
team = self.team
self.team = team
self._connected = True
self.db = dbid
_finish_response(
requests.post(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=details,
auth=self._auth(),
)
)
def delete_database(
self,
dbid: Optional[str] = None,
team: Optional[str] = None,
force: bool = False,
) -> None:
"""Delete a TerminusDB database.
If ``team`` is provided, then the team in the config will be updated
and the new value will be used in future requests to the server.
Parameters
----------
dbid : str
ID of the database to delete
team : str, optional
the team in which the database resides (defaults to "admin")
force: bool
Raises
------
UserWarning
If the value of dbid is None.
InterfaceError
if the client does not connect to a server.
Examples
-------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.delete_database("<database>", "<team>")
"""
self._check_connection(check_db=False)
if dbid is None:
raise UserWarning(
f"You are currently using the database: {self.team}/{self.db}. If you want to delete it, please do 'delete_database({self.db},{self.team})' instead."
)
self.db = dbid
if team is None:
warnings.warn(
f"Delete Database Warning: You have not specify the team, assuming {self.team}/{self.db}"
)
else:
self.team = team
payload = {"force": force}
_finish_response(
requests.delete(
self._db_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
params=payload,
)
)
self.db = None
def _validate_graph_type(self, graph_type):
if graph_type not in ["instance", "schema"]:
raise ValueError("graph_type can only be 'instance' or 'schema'")
def get_triples(self, graph_type: str) -> str:
"""Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str
"""
### TODO: make triples works again
raise InterfaceError("get_triples is temporary not avaliable in this version")
self._check_connection()
self._validate_graph_type(graph_type)
result = requests.get(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"update_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.post(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def insert_triples(
self, graph_type: str, turtle, commit_msg: Optional[str] = None
) -> None:
"""Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a database
"""
### TODO: make triples works again
raise InterfaceError(
"insert_triples is temporary not avaliable in this version"
)
self._check_connection()
self._validate_graph_type(graph_type)
params = {"commit_info": self._generate_commit(commit_msg)}
params["turtle"] = turtle
result = requests.put(
self._triples_url(graph_type),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=params,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def query_document(
self,
document_template: dict,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
Iterable
"""
self._validate_graph_type(graph_type)
self._check_connection()
payload = {"query": document_template, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
add_args = ["prefixed", "minimized", "unfold"]
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.post(
self._documents_url(),
headers={
"user-agent": f"terminusdb-client-python/{__version__}",
"X-HTTP-Method-Override": "GET",
},
json=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_document(
self,
iri_id: str,
graph_type: str = "instance",
get_data_version: bool = False,
**kwargs,
) -> dict:
"""Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "minimized", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "minimized", "unfold"]
self._check_connection()
payload = {"id": iri_id, "graph_type": graph_type}
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return json.loads(result), version
return json.loads(_finish_response(result))
def get_documents_by_type(
self,
doc_type: str,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version=False,
**kwargs,
) -> Union[Iterable, list]:
"""Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"type": doc_type, "graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_all_documents(
self,
graph_type: str = "instance",
skip: int = 0,
count: Optional[int] = None,
as_list: bool = False,
get_data_version: bool = False,
**kwargs,
) -> Union[Iterable, list, tuple]:
"""Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the avalible result.
as_list: bool
If the result returned as list rather than an iterator.
get_data_version: bool
If the version of the document(s) should be obtained. If True, the method return the result and the version as a tuple.
kwargs:
Additional boolean flags for retriving. Currently avaliable: "prefixed", "unfold"
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
iterable
Stream of dictionaries
"""
self._validate_graph_type(graph_type)
add_args = ["prefixed", "unfold"]
self._check_connection()
payload = {"graph_type": graph_type}
payload["skip"] = skip
if count is not None:
payload["count"] = count
for the_arg in add_args:
if the_arg in kwargs:
payload[the_arg] = kwargs[the_arg]
result = requests.get(
self._documents_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=payload,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
return_obj = _result2stream(result)
if as_list:
return list(return_obj), version
else:
return return_obj, version
return_obj = _result2stream(_finish_response(result))
if as_list:
return list(return_obj)
else:
return return_obj
def get_existing_classes(self):
"""Get all the existing classes (only ids) in a database."""
all_existing_obj = self.get_all_documents(graph_type="schema")
all_existing_class = {}
for item in all_existing_obj:
if item.get("@id"):
all_existing_class[item["@id"]] = item
return all_existing_class
def _conv_to_dict(self, obj):
if isinstance(obj, dict):
return _clean_dict(obj)
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif hasattr(obj, "_to_dict"):
if hasattr(obj, "_isinstance") and obj._isinstance:
if hasattr(obj.__class__, "_subdocument"):
raise ValueError("Subdocument cannot be added directly")
return obj._obj_to_dict()
else:
return obj._to_dict()
else:
raise ValueError("Object cannot convert to dictionary")
def _ref_extract(self, target_key, search_item):
if hasattr(search_item, "items"):
for key, value in search_item.items():
if key == target_key:
yield value
if isinstance(value, dict):
yield from self._ref_extract(target_key, value)
elif isinstance(value, list):
for item in value:
yield from self._ref_extract(target_key, item)
def _convert_dcoument(self, document, graph_type):
if isinstance(document, list):
new_doc = []
captured = []
referenced = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
item_capture = item_dict.get("@capture")
if item_capture:
captured.append(item_capture)
referenced += list(self._ref_extract("@ref", item_dict))
referenced = list(set(referenced))
for item in referenced:
if item not in captured:
raise ValueError(
f"{item} is referenced but not captured. Seems you forgot to submit one or more object(s)."
)
else:
if hasattr(document, "to_dict") and graph_type != "schema":
raise InterfaceError(
"Inserting WOQLSchema object into non-schema graph."
)
new_doc = self._conv_to_dict(document)
if isinstance(new_doc, dict) and list(self._ref_extract("@ref", new_doc)):
raise ValueError(
"There are uncaptured references. Seems you forgot to submit one or more object(s)."
)
return new_doc
def insert_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
full_replace: bool = False,
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object as the first element in the list of documents if using this option.
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
list
list of ids of the inseted docuemnts
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
if full_replace:
params["full_replace"] = "true"
else:
params["full_replace"] = "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
if len(new_doc) == 0:
return
elif not isinstance(new_doc, list):
new_doc = [new_doc]
if full_replace:
if new_doc[0].get("@type") != "@context":
raise ValueError(
"The first item in docuemnt need to be dictionary representing the context object."
)
else:
if new_doc[0].get("@type") == "@context":
warnings.warn(
"To replace context, need to use `full_replace` or `replace_document`, skipping context object now."
)
new_doc.pop(0)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.post(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def replace_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
create: bool = False,
) -> None:
"""Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
create : bool
Create the document if it does not yet exist.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
params["create"] = "true" if create else "false"
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
new_doc = self._convert_dcoument(document, graph_type)
json_string = json.dumps(new_doc).encode("utf-8")
if compress != "never" and len(json_string) > compress:
headers.update(
{"Content-Encoding": "gzip", "Content-Type": "application/json"}
)
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
data=gzip.compress(json_string),
auth=self._auth(),
)
else:
result = requests.put(
self._documents_url(),
headers=headers,
params=params,
json=new_doc,
auth=self._auth(),
)
result = json.loads(_finish_response(result))
if isinstance(document, list):
for idx, item in enumerate(document):
if hasattr(item, "_obj_to_dict") and not hasattr(item, "_backend_id"):
item._backend_id = result[idx][len("terminusdb:///data/") :]
return result
def update_document(
self,
document: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
compress: Union[str, int] = 1024,
) -> None:
"""Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
compress : str or int
If it is an integer, size of the data larger than this (in bytes) will be compress with gzip in the request (assume encoding as UTF-8, 0 = always compress). If it is `never` it will never compress the data.
Raises
------
InterfaceError
if the client does not connect to a database
"""
self.replace_document(
document, graph_type, commit_msg, last_data_version, compress, True
)
def delete_document(
self,
document: Union[str, list, dict, Iterable],
graph_type: str = "instance",
commit_msg: Optional[str] = None,
last_data_version: Optional[str] = None,
) -> None:
"""Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._validate_graph_type(graph_type)
self._check_connection()
doc_id = []
if not isinstance(document, (str, list, dict)) and hasattr(
document, "__iter__"
):
document = list(document)
if not isinstance(document, list):
document = [document]
for doc in document:
if hasattr(doc, "_obj_to_dict"):
doc = doc._obj_to_dict()
if isinstance(doc, dict) and doc.get("@id"):
doc_id.append(doc.get("@id"))
elif isinstance(doc, str):
doc_id.append(doc)
params = self._generate_commit(commit_msg)
params["graph_type"] = graph_type
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
_finish_response(
requests.delete(
self._documents_url(),
headers=headers,
params=params,
json=doc_id,
auth=self._auth(),
)
)
def has_doc(self, doc_id: str, graph_type: str = "instance") -> bool:
"""Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist
"""
self._validate_graph_type(graph_type)
self._check_connection()
all_existing_obj = self.get_all_documents(graph_type=graph_type)
all_existing_id = list(map(lambda x: x.get("@id"), all_existing_obj))
return doc_id in all_existing_id
def get_class_frame(self, class_name):
"""Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information
"""
self._check_connection()
opts = {"type": class_name}
result = requests.get(
self._class_frame_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
params=opts,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def commit(self):
"""Not implementated: open transactions currently not suportted. Please check back later."""
def query(
self,
woql_query: Union[dict, WOQLQuery],
commit_msg: Optional[str] = None,
get_data_version: bool = False,
last_data_version: Optional[str] = None,
# file_dict: Optional[dict] = None,
) -> Union[dict, str]:
"""Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data_version: bool
If the data version of the query result(s) should be obtained. If True, the method return the result and the version as a tuple.
last_data_version : str
Last version before the update, used to check if the document has been changed unknowingly
file_dict: **deprecated**
File dictionary to be associated with post name => filename, for multipart POST
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").query(woql, "updating graph")
Returns
-------
dict
"""
self._check_connection()
query_obj = {"commit_info": self._generate_commit(commit_msg)}
if isinstance(woql_query, WOQLQuery):
request_woql_query = woql_query.to_dict()
else:
request_woql_query = woql_query
query_obj["query"] = request_woql_query
headers = {"user-agent": f"terminusdb-client-python/{__version__}"}
if last_data_version is not None:
headers["TerminusDB-Data-Version"] = last_data_version
result = requests.post(
self._query_url(),
headers=headers,
json=query_obj,
auth=self._auth(),
)
if get_data_version:
result, version = _finish_response(result, get_data_version)
result = json.loads(result)
else:
result = json.loads(_finish_response(result))
if result.get("inserts") or result.get("deletes"):
return "Commit successfully made."
elif get_data_version:
return result, version
else:
return result
def create_branch(self, new_branch_id: str, empty: bool = False) -> None:
"""Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
if empty:
source = {}
elif self.ref:
source = {"origin": f"{self.team}/{self.db}/{self.repo}/commit/{self.ref}"}
else:
source = {
"origin": f"{self.team}/{self.db}/{self.repo}/branch/{self.branch}"
}
_finish_response(
requests.post(
self._branch_url(new_branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=source,
auth=self._auth(),
)
)
def delete_branch(self, branch_id: str) -> None:
"""Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database
"""
self._check_connection()
_finish_response(
requests.delete(
self._branch_url(branch_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def pull(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.pull()
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self.author
if message is None:
message = (
f"Pulling from {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._pull_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def fetch(self, remote_id: str) -> dict:
"""Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database"""
self._check_connection()
result = requests.post(
self._fetch_url(remote_id),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def push(
self,
remote: str = "origin",
remote_branch: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of the operation
Raises
------
InterfaceError
if the client does not connect to a database
Examples
-------
>>> WOQLClient(server="http://localhost:6363").push(remote="origin", remote_branch = "main", author = "admin", message = "commit message"})
Returns
-------
dict
"""
self._check_connection()
if remote_branch is None:
remote_branch = self.branch
if author is None:
author = self._author
if message is None:
message = (
f"Pushing to {remote}/{remote_branch} by Python client {__version__}"
)
rc_args = {
"remote": remote,
"remote_branch": remote_branch,
"author": author,
"message": message,
}
result = requests.post(
self._push_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def rebase(
self,
branch: Optional[str] = None,
commit: Optional[str] = None,
rebase_source: Optional[str] = None,
message: Optional[str] = None,
author: Optional[str] = None,
) -> dict:
"""Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for the rebase
message : str, optional
the commit message
author : str, optional
the commit author
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
dict
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.rebase("the_branch")
"""
self._check_connection()
if branch is not None and commit is None:
rebase_source = "/".join([self.team, self.db, self.repo, "branch", branch])
elif branch is None and commit is not None:
rebase_source = "/".join([self.team, self.db, self.repo, "commit", commit])
elif branch is not None or commit is not None:
raise RuntimeError("Cannot specify both branch and commit.")
elif rebase_source is None:
raise RuntimeError(
"Need to specify one of 'branch', 'commit' or the 'rebase_source'"
)
if author is None:
author = self._author
if message is None:
message = f"Rebase from {rebase_source} by Python client {__version__}"
rc_args = {"rebase_from": rebase_source, "author": author, "message": message}
result = requests.post(
self._rebase_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
return json.loads(_finish_response(result))
def reset(
self, commit: Optional[str] = None, soft: bool = False, use_path: bool = False
) -> None:
"""Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
commit: string
Commit id or path to the commit (if use_path is True), for instance '234980523ffaf93' or 'admin/database/local/commit/234980523ffaf93'. If not provided, it will reset to the newest commit (useful when need to go back after a soft reset).
soft: bool
Flag indicating if the reset if soft, that is referencing to a previous commit instead of resetting to a previous commit in the backend and wipping newer commits.
use_path : bool
Wheather or not the commit given is an id or path. Default using id and use_path is False.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.reset('234980523ffaf93')
>>> client.reset('admin/database/local/commit/234980523ffaf93', use_path=True)
"""
self._check_connection()
if soft:
if use_path:
self._ref = commit.split("/")[-1]
else:
self._ref = commit
return None
else:
self._ref = None
if commit is None:
return None
if use_path:
commit_path = commit
else:
commit_path = f"{self.team}/{self.db}/{self.repo}/commit/{commit}"
_finish_response(
requests.post(
self._reset_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_descriptor": commit_path},
auth=self._auth(),
)
)
def optimize(self, path: str) -> None:
"""Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.optimize('admin/database') # optimise database branch (here main)
>>> client.optimize('admin/database/_meta') # optimise the repository graph (actually creates a squashed flat layer)
>>> client.optimize('admin/database/local/_commits') # commit graph is optimised
"""
self._check_connection()
_finish_response(
requests.post(
self._optimize_url(path),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
)
def squash(
self,
message: Optional[str] = None,
author: Optional[str] = None,
reset: bool = False,
) -> str:
"""Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
reset : bool
Perform reset after squash
Returns
-------
str
commit id to be reset
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> client.squash('This is a squash commit message!')
"""
self._check_connection()
result = requests.post(
self._squash_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json={"commit_info": self._generate_commit(message, author)},
auth=self._auth(),
)
# API response:
# {'@type' : 'api:SquashResponse',
# 'api:commit' : Commit,
# 'api:old_commit' : Old_Commit,
# 'api:status' : "api:success"}
commit_id = json.loads(_finish_response(result)).get("api:commit")
if reset:
self.reset(commit_id)
return commit_id
def _convert_diff_dcoument(self, document):
if isinstance(document, list):
new_doc = []
for item in document:
item_dict = self._conv_to_dict(item)
new_doc.append(item_dict)
else:
new_doc = self._conv_to_dict(document)
return new_doc
def diff(
self,
before: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
after: Union[
str,
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
document_id: Union[str, None] = None
):
"""Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "Person/Jane", "@type" : "Person", "name" : "Jane"}, { "@id" : "Person/Jane", "@type" : "Person", "name" : "Janine"})
>>> result.to_json = '{ "name" : { "@op" : "SwapValue", "@before" : "Jane", "@after": "Janine" }}'"""
request_dict = {}
for key, item in {"before": before, "after": after}.items():
if isinstance(item, str):
request_dict[f"{key}_data_version"] = item
else:
request_dict[key] = self._convert_diff_dcoument(item)
if document_id is not None:
if "before_data_version" in request_dict:
if document_id[:len("terminusdb:///data")] == "terminusdb:///data":
request_dict["document_id"] = document_id
else:
raise ValueError(f"Valid document id starts with `terminusdb:///data`, but got {document_id}")
else:
raise ValueError("`document_id` can only be used in conjusction with a data version or commit ID as `before`, not a document object")
if self._connected:
result = _finish_response(
requests.post(
self._diff_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return Patch(json=result)
def patch(
self,
before: Union[
dict,
List[dict],
"WOQLSchema", # noqa:F821
"DocumentTemplate", # noqa:F821
List["DocumentTemplate"], # noqa:F821
],
patch: Patch,
):
"""Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> patch_obj = Patch(json='{"name" : { "@op" : "ValueSwap", "@before" : "Jane", "@after": "Janine" }}')
>>> result = client.patch({ "@id" : "Person/Jane", "@type" : Person", "name" : "Jane"}, patch_obj)
>>> print(result)
'{ "@id" : "Person/Jane", "@type" : Person", "name" : "Janine"}'"""
request_dict = {
"before": self._convert_diff_dcoument(before),
"patch": patch.content,
}
if self._connected:
result = _finish_response(
requests.post(
self._patch_url(),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
auth=self._auth(),
)
)
else:
result = _finish_response(
requests.post(
self.server_url,
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=request_dict,
)
)
return json.loads(result)
def clonedb(
self, clone_source: str, newid: str, description: Optional[str] = None
) -> None:
"""Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the client does not connect to a database
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.clonedb("http://terminusdb.com/some_user/test_db", "my_test_db")
"""
self._check_connection()
if description is None:
description = f"New database {newid}"
rc_args = {"remote_url": clone_source, "label": newid, "comment": description}
_finish_response(
requests.post(
self._clone_url(newid),
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
json=rc_args,
auth=self._auth(),
)
)
def _generate_commit(
self, msg: Optional[str] = None, author: Optional[str] = None
) -> dict:
"""Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "<author>")
{'author': '<author>', 'message': '<message>'}
"""
if author:
mes_author = author
else:
mes_author = self._author
if not msg:
msg = f"Commit via python client {__version__}"
return {"author": mes_author, "message": msg}
def _auth(self):
# if https basic
if not self._use_token and self._connected and self._key and self.user:
return (self.user, self._key)
elif self._connected and self._jwt_token is not None:
return JWTAuth(self._jwt_token)
elif self._connected and self._api_token is not None:
return APITokenAuth(self._api_token)
elif self._connected:
return APITokenAuth(os.environ["TERMINUSDB_ACCESS_TOKEN"])
else:
raise RuntimeError("Client not connected.")
# TODO: remote_auth
def get_database(self, dbid: str) -> Optional[dict]:
"""
Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found
"""
self._check_connection(check_db=False)
for this_db in self.get_databases():
if this_db["name"] == dbid:
return this_db
return None
def get_databases(self) -> List[dict]:
"""
Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
result = requests.get(
self.api + "/",
headers={"user-agent": f"terminusdb-client-python/{__version__}"},
auth=self._auth(),
)
return json.loads(_finish_response(result))
def list_databases(self) -> List[Dict]:
"""
Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts
"""
self._check_connection(check_db=False)
all_dbs = []
for data in self.get_databases():
all_dbs.append(data["name"])
return all_dbs
def _db_url_fragment(self):
if self._db == "_system":
return self._db
return f"{self._team}/{self._db}"
def _db_base(self, action: str):
return f"{self.api}/{action}/{self._db_url_fragment()}"
def _branch_url(self, branch_id: str):
base_url = self._repo_base("branch")
branch_id = urlparse.quote(branch_id)
return f"{base_url}/branch/{branch_id}"
def _repo_base(self, action: str):
return self._db_base(action) + f"/{self._repo}"
def _branch_base(self, action: str):
base = self._repo_base(action)
if self._repo == "_meta":
return base
if self._branch == "_commits":
return base + f"/{self._branch}"
elif self.ref:
return base + f"/commit/{self._ref}"
else:
return base + f"/branch/{self._branch}"
return base
def _query_url(self):
if self._db == "_system":
return self._db_base("woql")
return self._branch_base("woql")
def _class_frame_url(self):
if self._db == "_system":
return self._db_base("schema")
return self._branch_base("schema")
def _documents_url(self):
if self._db == "_system":
base_url = self._db_base("document")
else:
base_url = self._branch_base("document")
return base_url
def _triples_url(self, graph_type="instance"):
if self._db == "_system":
base_url = self._db_base("triples")
else:
base_url = self._branch_base("triples")
return f"{base_url}/{graph_type}"
def _clone_url(self, new_repo_id: str):
new_repo_id = urlparse.quote(new_repo_id)
return f"{self.api}/clone/{self._team}/{new_repo_id}"
def _cloneable_url(self):
crl = f"{self.server_url}/{self._team}/{self._db}"
return crl
def _pull_url(self):
return self._branch_base("pull")
def _fetch_url(self, remote_name: str):
furl = self._branch_base("fetch")
remote_name = urlparse.quote(remote_name)
return furl + "/" + remote_name + "/_commits"
def _rebase_url(self):
return self._branch_base("rebase")
def _reset_url(self):
return self._branch_base("reset")
def _optimize_url(self, path: str):
path = urlparse.quote(path)
return f"{self.api}/optimize/{path}"
def _squash_url(self):
return self._branch_base("squash")
def _diff_url(self):
return self._branch_base("diff")
def _patch_url(self):
return self._branch_base("patch")
def _push_url(self):
return self._branch_base("push")
def _db_url(self):
return self._db_base("db")
|
split_array_as_array
|
Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
# MASKED: split_array_as_array function (lines 140-161)
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
| 140
| 161
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
split_array_as_list
|
Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
# MASKED: split_array_as_list function (lines 163-176)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
| 163
| 176
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
mean
|
compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
# MASKED: mean function (lines 258-288)
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
| 258
| 288
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
var
|
compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
# MASKED: var function (lines 290-321)
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
| 290
| 321
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
median
|
compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
# MASKED: median function (lines 343-382)
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
| 343
| 382
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
mode
|
compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
# MASKED: mode function (lines 384-407)
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
| 384
| 407
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
any
|
compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
# MASKED: any function (lines 489-509)
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
| 489
| 509
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
|
is_duplicate
|
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
# MASKED: is_duplicate function (lines 291-303)
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
| 291
| 303
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
is_JOP_COP_dispatcher
|
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
# MASKED: is_JOP_COP_dispatcher function (lines 305-330)
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
| 305
| 330
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
is_JOP_COP_dataloader
|
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
# MASKED: is_JOP_COP_dataloader function (lines 332-345)
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
| 332
| 345
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
is_JOP_trampoline
|
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
# MASKED: is_JOP_trampoline function (lines 356-370)
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
| 356
| 370
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
is_COP_strong_trampoline
|
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
# MASKED: is_COP_strong_trampoline function (lines 407-437)
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
| 407
| 437
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
is_COP_intrastack_pivot
|
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
# MASKED: is_COP_intrastack_pivot function (lines 439-454)
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
| 439
| 454
|
"""
Gadget class
"""
# Standard Library Imports
# Third Party Imports
# Local Imports
from static_analyzer.Instruction import Instruction
class Gadget(object):
"""
The Gadget class represents a single gadget.
"""
def __init__(self, raw_gadget):
"""
Gadget constructor
:param str raw_gadget: raw line output from ROPgadget
"""
# Parse the raw line
self.offset = raw_gadget[:raw_gadget.find(":")]
self.instruction_string = raw_gadget[raw_gadget.find(":") + 2:]
# Parse instruction objects
self.instructions = []
for instr in self.instruction_string.split(" ; "):
self.instructions.append(Instruction(instr))
# Initialize score
self.score = 0.0
def is_useless_op(self):
"""
:return boolean: Returns True if the first instruction opcode is in the "useless" list, False otherwise
Default behavior is to consider opcodes useful unless otherwise observed.
"""
first_opcode = self.instructions[0].opcode
# Bulk catch for all "jump" opcodes: No reason to include the instruction, just use the suffix directly
if first_opcode.startswith("j"):
return True
# Bulk catch for bounds checked jumps, same reason as above
if first_opcode.startswith("bnd"):
return True
# Bulk catch for all "ret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("ret"):
return True
# Bulk catch for all "iret" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("iret"):
return True
# Bulk catch for all "call" opcodes: Bug in ROP gadget finds some gadgets that start with this GPI
if first_opcode.startswith("call"):
return True
# Useless opcodes:
# NOP - No reason to include the instruction, just use the suffix directly
# LJMP - Same reason as "jump" opcodes above
useless = ["nop", "fnop", "ljmp"]
return first_opcode in useless
def contains_unusable_op(self):
"""
:return boolean: Returns True if any instruction opcode is unusable. False otherwise
unusable instructions are Ring-0 opcodes that trap in user mode and some other exceptional ops.
"""
for instr in self.instructions:
# Bulk catch for all "invalidate" opcodes: Ring-0 instructions
if instr.opcode.startswith("inv"):
return True
# Bulk catch for all "Virtual-Machine" opcodes: Ring-0 instructions
if instr.opcode.startswith("vm") and instr.opcode != "vminsd" and instr.opcode != "vminpd":
return True
# Bulk catch for all "undefined" opcodes
if instr.opcode.startswith("ud"):
return True
# Other Ring-0 opcodes and RSM, LOCK prefix
unusable = ["clts", "hlt", "lgdt", "lidt", "lldt", "lmsw", "ltr", "monitor", "mwait",
"swapgs", "sysexit", "sysreturn", "wbinvd", "wrmsr", "xsetbv", "rsm", "lock"]
if instr.opcode in unusable:
return True
# Check for ring-0 operands (control, debug, and test registers)
if instr.op1 is not None:
if instr.op1.startswith("cr") or instr.op1.startswith("tr") or instr.op1.startswith("db"):
return True
if instr.op2 is not None:
if instr.op2.startswith("cr") or instr.op2.startswith("tr") or instr.op2.startswith("db"):
return True
return False
def is_gpi_only(self):
"""
:return boolean: Returns True if the gadget is a single instruction and starts with 'ret', 'jmp', or 'call',
False otherwise
"""
if len(self.instructions) == 1:
opcode = self.instructions[0].opcode
if opcode.startswith("ret") or opcode.startswith("jmp") or opcode.startswith("call"):
return True
return False
def is_invalid_branch(self):
"""
:return boolean: Returns True if the gadget is 'jmp' or 'call' ending and the call target is a constant offset
or does not target a recognized register family. False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("call") or last_instr.opcode.startswith("jmp"):
if Instruction.get_operand_register_family(last_instr.op1) is None:
return True
return False
def has_invalid_ret_offset(self):
"""
:return boolean: Returns True if the gadget is 'ret' ending and contains a constant offset that is not byte
aligned or is greater than 32 bytes, False otherwise
"""
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("ret") and last_instr.op1 is not None:
offset = Instruction.get_operand_as_constant(last_instr.op1)
if (offset % 2 != 0) or (offset > 32):
return True
return False
def clobbers_created_value(self):
"""
:return boolean: Returns True if the gadget completely overwrites the value created in the first instruction,
False otherwise.
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
return False
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
# Most likely means first operand is a constant, exclude from analysis
if first_family is None:
return False
# Iterate through intermediate instructions, determine if it overwrites protected value (or part of it)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value() or "xchg" in cur_instr.opcode:
continue
# Check for non-static modification of the register family
if first_family == Instruction.get_operand_register_family(cur_instr.op1):
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def creates_unusable_value(self):
"""
:return boolean: Returns True if the gadget creates a value in segment or extension registers, or are
RIP-relative, or are constant memory locations; False otherwise.
"""
# Check if the first instruction creates a value (or may potentially set a flag
first_instr = self.instructions[0]
if first_instr.opcode in ["cmp", "test", "push"] or first_instr.op1 is None:
return False
# Check if first operand is not a constant and it does not belong to a recognized register family
if not Instruction.is_constant(first_instr.op1) and \
Instruction.get_operand_register_family(first_instr.op1) is None:
return True
return False
def contains_intermediate_GPI(self):
"""
:return boolean: Returns True if the gadget's intermediate instructions contain a GPI (or a generic interrupt),
False otherwise.
"""
for i in range(len(self.instructions)-1):
cur_opcode = self.instructions[i].opcode
cur_target = self.instructions[i].op1
if cur_opcode.startswith("ret") or \
cur_opcode == "syscall" or cur_opcode == "sysenter" or cur_opcode.startswith("int") or \
("jmp" in cur_opcode and not Instruction.is_constant(cur_target)) or \
("call" in cur_opcode and not Instruction.is_constant(cur_target)):
return True
return False
def clobbers_stack_pointer(self):
"""
:return boolean: Returns True if the ROP gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
# Only check ROP gadgets
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("ret"):
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the stack pointer register family
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "pop"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return True
return False
def clobbers_indirect_target(self):
"""
:return boolean: Returns True if the JOP/COP gadget's instructions modify the indirect branch register in
certain ways, False otherwise.
"""
# Get the register family of the indirect jump / call
last_instr = self.instructions[len(self.instructions)-1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
family = Instruction.get_operand_register_family(last_instr.op1)
# Check each instruction to see if it clobbers the value
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# First check if the instruction modifies the target
if cur_instr.op1 in Instruction.register_families[family]:
# Does the instruction zeroize out the target?
if cur_instr.opcode == "xor" and cur_instr.op1 == cur_instr.op2:
return True
# Does the instruction perform a RIP-relative LEA into the target?
if cur_instr.opcode == "lea" and ("rip" in cur_instr.op2 or "eip" in cur_instr.op2):
return True
# Does the instruction load a string or a value of an input port into the target?
if cur_instr.opcode.startswith("lods") or cur_instr.opcode == "in":
return True
# Does the instruction overwrite the target with a static value or segment register value?
if "mov" in cur_instr.opcode and (Instruction.is_constant(cur_instr.op2) or
Instruction.get_operand_register_family(cur_instr.op2) is None):
return True
return False
def has_invalid_int_handler(self):
"""
:return boolean: Returns True if the gadget's instructions assign a non-static value to the stack pointer
register, False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("int") and last_instr.op1 != "0x80":
return True
return False
def is_rip_relative_indirect_branch(self):
"""
:return boolean: Returns True if the gadget is a JOP/COP gadget relying on a RIP relative indirect branch,
False otherwise.
"""
last_instr = self.instructions[len(self.instructions) - 1]
if last_instr.opcode.startswith("jmp") or last_instr.opcode.startswith("call"):
if "rip" in last_instr.op1 or "eip" in last_instr.op1:
return True
return False
def contains_static_call(self):
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("call") and Instruction.is_constant(cur_instr.op1):
return True
return False
def is_equal(self, rhs):
"""
:return boolean: Returns True if the gadgets are an exact match, including offset. Used for gadget locality.
"""
return self.offset == rhs.offset and self.instruction_string == rhs.instruction_string
def is_duplicate(self, rhs):
"""
:return boolean: Returns True if the gadgets are a semantic match. Used for non-locality gadget metrics.
Semantic match is defined as the exact same sequence of equivalent instructions.
"""
if len(self.instructions) != len(rhs.instructions):
return False
for i in range(len(self.instructions)):
if not self.instructions[i].is_equivalent(rhs.instructions[i]):
return False
return True
def is_JOP_COP_dispatcher(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP dispatcher. Defined as a gadget that begins with a
arithmetic operation on a register and ends with a branch to a deference of that register. Used
to iterate through instructions in payload. Only restrictions on the arithmetic operation is
that it doesn't use the same register as both operands.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
# Only consider gadgets that end in dereference of a register and start with opcodes of interest
if "[" in last_instr.op1 and \
first_instr.opcode in ["inc", "dec", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(last_instr.op1)
arith_target_1 = Instruction.get_operand_register_family(first_instr.op1)
# Secondary check: if the second op is a constant ensure it is in range [1, 32]
if Instruction.is_constant(first_instr.op2):
additive_value = Instruction.get_operand_as_constant(first_instr.op2)
if additive_value < 1 or additive_value > 32:
return False
arith_target_2 = Instruction.get_operand_register_family(first_instr.op2)
return gpi_target == arith_target_1 and arith_target_1 != arith_target_2
return False
def is_JOP_COP_dataloader(self):
"""
:return boolean: Returns True if the gadget is a JOP or COP data loader. Defined as a gadget that begins with a
pop opcode to a non-memory location, that is also not the target of the GPI. Used to pop a
necessary value off stack en masse before redirecting to the dispatcher.
"""
first_instr = self.instructions[0]
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(self.instructions[len(self.instructions) - 1].op1)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target != pop_target
return False
def is_JOP_initializer(self):
"""
:return boolean: Returns True if the gadget is a JOP Initializer. Defined as a gadget that begins with a
"pop all" opcode, used to pop necessary values off stack en masse before redirecting to the
dispatcher.
"""
return self.instructions[0].opcode.startswith("popa")
def is_JOP_trampoline(self):
"""
:return boolean: Returns True if the gadget is a JOP trampoline. Defined as a gadget that begins with a
pop opcode to a non-memory location, and that ends in a dereference of that value. Used to
redirect execution to value stored in memory.
"""
first_instr = self.instructions[0]
gpi_target_op = self.instructions[len(self.instructions) - 1].op1
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
gpi_target = Instruction.get_operand_register_family(gpi_target_op)
pop_target = Instruction.get_operand_register_family(first_instr.op1)
return gpi_target == pop_target and "[" in gpi_target_op
return False
def is_COP_initializer(self):
"""
:return boolean: Returns True if the gadget is a COP initializer. Defined as a gadget that begins with a
"pop all" opcode, does not use register bx/cx/dx/di as the call target, and does not clobber
bx/cx/dx or the call target in an intermediate instruction
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions)-1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
if first_instr.opcode.startswith("popa") and call_target not in [1, 2, 3, 5]: # BX, CX, DX, DI families
# Build collective list of register families to protect from being clobbered
protected_families = [1, 2, 3, call_target]
protected_registers = []
for family in protected_families:
for register in Instruction.register_families[family]:
protected_registers.append(register)
# Scan intermediate instructions to ensure they do not clobber a protected register
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Check for non-static modification of the register family
if cur_instr.op1 in protected_registers:
if (cur_instr.op2 is None and cur_instr.opcode not in ["inc", "dec", "neg", "not"]) or \
(cur_instr.op2 is not None and not Instruction.is_constant(cur_instr.op2)):
return False
return True
return False
def is_COP_strong_trampoline(self):
"""
:return boolean: Returns True if the gadget is a COP strong trampoline. Defined as a gadget that begins with a
pop opcode, and contains at least one other pop operation. The last non-pop all operation must
target the call target.
"""
first_instr = self.instructions[0]
last_instr = self.instructions[len(self.instructions) - 1]
call_target = Instruction.get_operand_register_family(last_instr.op1)
# Only consider instructions that start with a pop
if first_instr.opcode == "pop" and "[" not in first_instr.op1:
cnt_pops = 1
last_pop_target = first_instr.op1
# Scan intermediate instructions for pops
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("popa"):
cnt_pops += 1
if cur_instr.opcode == "pop" and "[" not in cur_instr.op1:
cnt_pops += 1
last_pop_target = cur_instr.op1
# Check that at least two pops occurred and the last pop target is the call target
if cnt_pops > 1 and last_pop_target in Instruction.register_families[call_target]:
return True
return False
def is_COP_intrastack_pivot(self):
"""
:return boolean: Returns True if the gadget is a COP Intra-stack pivot gadget. Defined as a gadget that begins
with an additive operation on the stack pointer register. Used to move around in shellcode
during COP exploits. Only restriction on the arithmetic operation is that the second operand
is not a pointer.
"""
first_instr = self.instructions[0]
if first_instr.opcode in ["inc", "add", "adc", "sub", "sbb"] and "[" not in first_instr.op1:
arith_target = Instruction.get_operand_register_family(first_instr.op1)
if arith_target == 7: # RSP, ESP family number
if first_instr.op2 is None or "[" not in first_instr.op2:
return True
return False
def check_contains_leave(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate "leave" instruction.
"""
for i in range(1, len(self.instructions)-1):
if self.instructions[i].opcode == "leave":
self.score += 2.0
return # Only penalize gadget once
def check_sp_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the stack pointer register family.
"""
# Scan instructions to determine if they modify the stack pointer register family
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 4 for move, load address, and exchange ops, 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == 7: # RSP, ESP family number
if "xchg" in cur_instr.opcode or "mov" in cur_instr.opcode or cur_instr.opcode in ["lea"]:
self.score += 4.0
elif cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
elif cur_instr.opcode == "pop":
self.score += 1.0
else:
self.score += 2.0 # Will be a static modification, otherwise it would have been rejected earlier
def check_negative_sp_offsets(self):
"""
:return void: Increases gadget's score if its cumulative register offsets are negative.
"""
sp_offset = 0
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)):
cur_instr = self.instructions[i]
if cur_instr.opcode == "push":
sp_offset -= 8
elif cur_instr.opcode == "pop" and cur_instr.op1 not in Instruction.register_families[7]:
sp_offset += 8
elif cur_instr.opcode in ["add", "adc"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset += Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode in ["sub", "sbb"] and cur_instr.op1 in Instruction.register_families[7] and \
Instruction.is_constant(cur_instr.op2):
sp_offset -= Instruction.get_operand_as_constant(cur_instr.op2)
elif cur_instr.opcode == "inc" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset += 1
elif cur_instr.opcode == "dec" and cur_instr.op1 in Instruction.register_families[7]:
sp_offset -= 1
elif cur_instr.opcode.startswith("ret") and cur_instr.op1 is not None:
sp_offset += Instruction.get_operand_as_constant(cur_instr.op1)
if sp_offset < 0:
self.score += 2.0
def check_contains_conditional_op(self):
"""
:return void: Increases gadget's score if it contains conditional instructions like jumps, sets, and moves.
"""
# Scan instructions to determine if they modify the stack pointer
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
if cur_instr.opcode.startswith("j") and cur_instr.opcode != "jmp":
self.score += 3.0
elif "cmov" in cur_instr.opcode or "cmpxchg" in cur_instr.opcode:
self.score += 2.0
elif "set" in cur_instr.opcode:
self.score += 1.0
def check_register_ops(self):
"""
:return void: Increases gadget's score if it contains operations on a value carrying or a bystander register
"""
first_instr = self.instructions[0]
# Check if the first instruction creates a value or is an xchg operand (excluded as an edge case)
if not first_instr.creates_value() or "xchg" in first_instr.opcode:
first_family = None
else:
# Check op1 to find the register family to protect
first_family = Instruction.get_operand_register_family(first_instr.op1)
for i in range(1, len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# If the new value is a modification of the value-carrying register
if first_family is not None and first_family == Instruction.get_operand_register_family(cur_instr.op1):
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 1.5
else:
self.score += 1.0 # Will be a static modification, otherwise it would have been rejected earlier
elif "xchg" not in cur_instr.opcode and cur_instr.opcode != "pop":
# The modification is to a "bystander register". static mods +0.5, non-static +1.0
if cur_instr.op2 is not None and Instruction.get_operand_register_family(cur_instr.op2) is not None:
self.score += 1.0
else:
self.score += 0.5
def check_branch_target_of_operation(self):
"""
:return void: Increases gadget's score if the gadget has an intermediate instruction that performs certain
operations on the indirect branch target register family.
"""
last_instr = self.instructions[len(self.instructions)-1]
target_family = Instruction.get_operand_register_family(last_instr.op1)
# Scan instructions to determine if they modify the target register family
for i in range(len(self.instructions) - 1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Increase score by 3 for shift/rotate ops, and 2 for others
if Instruction.get_operand_register_family(cur_instr.op1) == target_family:
if cur_instr.opcode in ["shl", "shr", "sar", "sal", "ror", "rol", "rcr", "rcl"]:
self.score += 3.0
else: # All other modifications to target register
self.score += 2.0
def check_memory_writes(self):
"""
:return void: Increases gadget's score if the gadget has an instruction that writes to memory.
"""
# Iterate through instructions except GPI
for i in range(len(self.instructions)-1):
cur_instr = self.instructions[i]
# Ignore instructions that do not create values
if not cur_instr.creates_value():
continue
# Have to check both operands for xchg instrucitons
if "xchg" in cur_instr.opcode and ("[" in cur_instr.op1 or "[" in cur_instr.op2):
self.score += 1.0
elif cur_instr.op1 is not None and "[" in cur_instr.op1:
self.score += 1.0
|
equal_up_to_global_phase
|
Determine whether two objects are equal up to global phase.
If `val` implements a `_equal_up_to_global_phase_` method then it is
invoked and takes precedence over all other checks:
- For complex primitive type the magnitudes of the values are compared.
- For `val` and `other` both iterable of the same length, consecutive
elements are compared recursively. Types of `val` and `other` does not
necessarily needs to match each other. They just need to be iterable and
have the same structure.
- For all other types, fall back to `_approx_eq_`
Args:
val: Source object for approximate comparison.
other: Target object for approximate comparison.
atol: The minimum absolute tolerance. This places an upper bound on
the differences in *magnitudes* of two compared complex numbers.
Returns:
True if objects are approximately equal up to phase, False otherwise.
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from collections.abc import Iterable
from typing import Any, Union
import numpy as np
from typing_extensions import Protocol
from cirq import linalg
from cirq._doc import doc_private
from cirq.protocols.approximate_equality_protocol import approx_eq
class SupportsEqualUpToGlobalPhase(Protocol):
"""Object which can be compared for equality mod global phase."""
@doc_private
def _equal_up_to_global_phase_(self, other: Any, *, atol: Union[int, float]) -> bool:
"""Approximate comparator.
Types implementing this protocol define their own logic for comparison
with other types.
Args:
other: Target object for comparison of equality up to global phase.
atol: The minimum absolute tolerance. See `np.isclose()`
documentation for details.
Returns:
True if objects are equal up to a global phase, False otherwise.
Returns NotImplemented when checking equality up to a global phase
is not implemented for given types.
"""
# MASKED: equal_up_to_global_phase function (lines 49-100)
|
def equal_up_to_global_phase(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool:
"""Determine whether two objects are equal up to global phase.
If `val` implements a `_equal_up_to_global_phase_` method then it is
invoked and takes precedence over all other checks:
- For complex primitive type the magnitudes of the values are compared.
- For `val` and `other` both iterable of the same length, consecutive
elements are compared recursively. Types of `val` and `other` does not
necessarily needs to match each other. They just need to be iterable and
have the same structure.
- For all other types, fall back to `_approx_eq_`
Args:
val: Source object for approximate comparison.
other: Target object for approximate comparison.
atol: The minimum absolute tolerance. This places an upper bound on
the differences in *magnitudes* of two compared complex numbers.
Returns:
True if objects are approximately equal up to phase, False otherwise.
"""
# Attempt _equal_up_to_global_phase_ for val.
eq_up_to_phase_getter = getattr(val, '_equal_up_to_global_phase_', None)
if eq_up_to_phase_getter is not None:
result = eq_up_to_phase_getter(other, atol)
if result is not NotImplemented:
return result
# Fall back to _equal_up_to_global_phase_ for other.
other_eq_up_to_phase_getter = getattr(other, '_equal_up_to_global_phase_', None)
if other_eq_up_to_phase_getter is not None:
result = other_eq_up_to_phase_getter(val, atol)
if result is not NotImplemented:
return result
# Fall back to special check for numeric arrays.
# Defer to numpy automatic type casting to determine numeric type.
if isinstance(val, Iterable) and isinstance(other, Iterable):
a = np.asarray(val)
b = np.asarray(other)
if a.dtype.kind in 'uifc' and b.dtype.kind in 'uifc':
return linalg.allclose_up_to_global_phase(a, b, atol=atol)
# Fall back to approx_eq for compare the magnitude of two numbers.
if isinstance(val, numbers.Number) and isinstance(other, numbers.Number):
result = approx_eq(abs(val), abs(other), atol=atol) # type: ignore
if result is not NotImplemented:
return result
# Fall back to cirq approx_eq for remaining types.
return approx_eq(val, other, atol=atol)
| 49
| 100
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from collections.abc import Iterable
from typing import Any, Union
import numpy as np
from typing_extensions import Protocol
from cirq import linalg
from cirq._doc import doc_private
from cirq.protocols.approximate_equality_protocol import approx_eq
class SupportsEqualUpToGlobalPhase(Protocol):
"""Object which can be compared for equality mod global phase."""
@doc_private
def _equal_up_to_global_phase_(self, other: Any, *, atol: Union[int, float]) -> bool:
"""Approximate comparator.
Types implementing this protocol define their own logic for comparison
with other types.
Args:
other: Target object for comparison of equality up to global phase.
atol: The minimum absolute tolerance. See `np.isclose()`
documentation for details.
Returns:
True if objects are equal up to a global phase, False otherwise.
Returns NotImplemented when checking equality up to a global phase
is not implemented for given types.
"""
def equal_up_to_global_phase(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool:
"""Determine whether two objects are equal up to global phase.
If `val` implements a `_equal_up_to_global_phase_` method then it is
invoked and takes precedence over all other checks:
- For complex primitive type the magnitudes of the values are compared.
- For `val` and `other` both iterable of the same length, consecutive
elements are compared recursively. Types of `val` and `other` does not
necessarily needs to match each other. They just need to be iterable and
have the same structure.
- For all other types, fall back to `_approx_eq_`
Args:
val: Source object for approximate comparison.
other: Target object for approximate comparison.
atol: The minimum absolute tolerance. This places an upper bound on
the differences in *magnitudes* of two compared complex numbers.
Returns:
True if objects are approximately equal up to phase, False otherwise.
"""
# Attempt _equal_up_to_global_phase_ for val.
eq_up_to_phase_getter = getattr(val, '_equal_up_to_global_phase_', None)
if eq_up_to_phase_getter is not None:
result = eq_up_to_phase_getter(other, atol)
if result is not NotImplemented:
return result
# Fall back to _equal_up_to_global_phase_ for other.
other_eq_up_to_phase_getter = getattr(other, '_equal_up_to_global_phase_', None)
if other_eq_up_to_phase_getter is not None:
result = other_eq_up_to_phase_getter(val, atol)
if result is not NotImplemented:
return result
# Fall back to special check for numeric arrays.
# Defer to numpy automatic type casting to determine numeric type.
if isinstance(val, Iterable) and isinstance(other, Iterable):
a = np.asarray(val)
b = np.asarray(other)
if a.dtype.kind in 'uifc' and b.dtype.kind in 'uifc':
return linalg.allclose_up_to_global_phase(a, b, atol=atol)
# Fall back to approx_eq for compare the magnitude of two numbers.
if isinstance(val, numbers.Number) and isinstance(other, numbers.Number):
result = approx_eq(abs(val), abs(other), atol=atol) # type: ignore
if result is not NotImplemented:
return result
# Fall back to cirq approx_eq for remaining types.
return approx_eq(val, other, atol=atol)
|
load_checkpoint
|
Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
# MASKED: load_checkpoint function (lines 45-67)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
| 45
| 67
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
load_variable
|
Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
# MASKED: load_variable function (lines 70-85)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
| 70
| 85
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
list_variables
|
Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
# MASKED: list_variables function (lines 88-104)
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
| 88
| 104
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
wait_for_new_checkpoint
|
Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
# MASKED: wait_for_new_checkpoint function (lines 107-135)
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
| 107
| 135
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
checkpoints_iterator
|
Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
# MASKED: checkpoints_iterator function (lines 138-201)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
| 138
| 201
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return py_checkpoint_reader.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer",
# +++ DIT: default write_version=saver_pb2.SaverDef.DIT
write_version=saver_pb2.SaverDef.DIT):
# --- DIT: default write_version=saver_pb2.SaverDef.DIT
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
#restore_op = io_ops.restore_v2(
# ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
# +++ DIT: check for restore_dit
if self._write_version == saver_pb2.SaverDef.V1 or self._write_version == saver_pb2.SaverDef.V2:
restore_op = io_ops.restore_v2(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
elif self._write_version == saver_pb2.SaverDef.DIT:
restore_op = io_ops.restore_dit(ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
# --- DIT: check for restore_dit
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
read_const
|
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
|
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
# MASKED: read_const function (lines 130-155)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
|
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
| 130
| 155
|
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
|
string_as_string_and_size
|
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
|
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
# MASKED: string_as_string_and_size function (lines 1075-1093)
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
|
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
| 1,075
| 1,093
|
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
|
string_as_string_size_and_kind
|
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
|
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
# MASKED: string_as_string_size_and_kind function (lines 1095-1123)
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
|
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
| 1,095
| 1,123
|
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
|
from_native_value
|
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
|
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
# MASKED: from_native_value function (lines 1391-1402)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
|
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
| 1,391
| 1,402
|
from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
|
get_package_list
|
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
# MASKED: get_package_list function (lines 182-204)
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
| 182
| 204
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
_check
|
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
# MASKED: _check function (lines 206-283)
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
| 206
| 283
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
add_output_file
|
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
# MASKED: add_output_file function (lines 712-776)
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
| 712
| 776
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
add_output
|
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
# MASKED: add_output function (lines 778-819)
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
| 778
| 819
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
add_external
|
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
# MASKED: add_external function (lines 969-1010)
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
| 969
| 1,010
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
add_existing_package
|
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
# MASKED: add_existing_package function (lines 1048-1093)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
| 1,048
| 1,093
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
has_package
|
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
# MASKED: has_package function (lines 1112-1134)
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
| 1,112
| 1,134
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
get_package
|
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
# MASKED: get_package function (lines 1136-1157)
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
| 1,136
| 1,157
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
change_model_ws
|
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
# MASKED: change_model_ws function (lines 1186-1240)
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
| 1,186
| 1,240
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
check
|
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
# MASKED: check function (lines 1485-1539)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
| 1,485
| 1,539
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
plot
|
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
# MASKED: plot function (lines 1541-1592)
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
| 1,541
| 1,592
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
to_shapefile
|
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
# MASKED: to_shapefile function (lines 1594-1620)
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
| 1,594
| 1,620
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
# external exceptions for users
class PackageLoadException(Exception):
"""
FloPy package load exception.
"""
def __init__(self, error, location=""):
"""Initialize exception."""
self.message = error
super().__init__(f"{error} ({location})")
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "nper":
if self.dis is not None:
return self.dis.nper
else:
return 0
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
elif exe_name.lower().endswith(".exe"):
exe = which(exe_name[:-4])
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
dedent_docstr
|
Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
# MASKED: dedent_docstr function (lines 63-77)
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
| 63
| 77
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
indent_docstr
|
Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' ') out of blank lines
Returns:
s with common indentation applied
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
# MASKED: indent_docstr function (lines 96-115)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
| 96
| 115
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
__init__
|
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
# MASKED: __init__ function (lines 166-186)
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
| 166
| 186
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
__init__
|
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
# MASKED: __init__ function (lines 208-234)
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
| 208
| 234
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
finalize_param
|
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
# MASKED: finalize_param function (lines 327-347)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
| 327
| 347
|
# -*- coding: utf-8 -*-
"""Docstring Parsers/Formatters"""
# TODO: break this module up into smaller pieces
import sys
import re
from textwrap import dedent
from collections import OrderedDict
from itertools import islice
from .autodocstring_logging import logger
PY3k = sys.version_info[0] == 3
if PY3k:
string_types = str,
else:
string_types = basestring, # pylint: disable=undefined-variable
def make_docstring_obj(docstr, default="google", template_order=False):
"""Detect docstring style and create a Docstring object
Parameters:
docstr (str): source docstring
default (str, class): 'google', 'numpy' or subclass
of Docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
Returns:
subclass of Docstring
"""
typ = detect_style(docstr)
logger.info("[make_docstring_obj] from {} to {}"
"".format(typ.__name__ if typ is not None else None, default.__name__))
if typ is None:
if issubclass(default, Docstring):
typ = default
else:
typ = STYLE_LOOKUP[default.lower()]
return typ(docstr, template_order=template_order)
def detect_style(docstr):
"""Detect docstr style from existing docstring
Parameters:
docstr (str): docstring whose style we want to know
Returns:
class: one of [GoogleDocstring, NumpyDocstring, None]; None
means no match
"""
docstr = dedent_docstr(docstr)
for c in STYLE_LOOKUP.values():
if c.detect_style(docstr):
return c
return None
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
def dedent_verbose(s, n=1):
new = dedent_docstr(s, n=n)
s_split = s.splitlines(keepends=True)
new_split = new.splitlines(keepends=True)
i, ind = 0, -1
for i in range(n, len(s_split)):
if s_split[i].strip():
ind = s_split[i].find(new_split[i])
break
if ind >= 0:
indent = s_split[i][:ind]
else:
indent = ""
return indent, new
def indent_docstr(s, indent, n=1, trim=True):
"""Add common indentation to all lines except first
Args:
s (str): docstring starting at indentation level 0
indent (str): text used for indentation, in practice
this will be the level of the declaration + 1
n (int): don't indent first n lines
trim (bool): trim whitespace (' \t') out of blank lines
Returns:
s with common indentation applied
"""
lines = s.splitlines(keepends=True)
for i in range(n, len(lines)):
if lines[i].strip() or not trim:
lines[i] = "{0}{1}".format(indent, lines[i])
else:
lines[i] = lines[i].strip(' \t')
return "".join(lines)
def count_leading_newlines(s):
"""count number of leading newlines
this includes newlines that are separated by other whitespace
"""
return s[:-len(s.lstrip())].count('\n')
def count_trailing_newlines(s):
"""count number of trailing newlines
this includes newlines that are separated by other whitespace
"""
return s[len(s.rstrip()):].count('\n')
def with_bounding_newlines(s, nleading=0, ntrailing=0, nl='\n'):
"""return s with at least # leading and # trailing newlines
this includes newlines that are separated by other whitespace
"""
return "{0}{1}{2}".format(nl * (nleading - count_leading_newlines(s)),
s,
nl * (ntrailing - count_trailing_newlines(s)))
def strip_newlines(s, nleading=0, ntrailing=0):
"""strip at most nleading and ntrailing newlines from s"""
for _ in range(nleading):
if s.lstrip(' \t')[0] == '\n':
s = s.lstrip(' \t')[1:]
elif s.lstrip(' \t')[0] == '\r\n':
s = s.lstrip(' \t')[2:]
for _ in range(ntrailing):
if s.rstrip(' \t')[-2:] == '\r\n':
s = s.rstrip(' \t')[:-2]
elif s.rstrip(' \t')[-1:] == '\n':
s = s.rstrip(' \t')[:-1]
return s
class Parameter(object):
""""""
names = None
types = None
description = None
tag = None
descr_only = None
meta = None
def __init__(self, names, types, description, tag=None, descr_only=False,
annotated=False, **kwargs):
"""
Args:
names (list): list of names
types (str): string describing data types
description (str): description text
tag (int): some meaningful index? not fleshed out yet
descr_only (bool): only description is useful
**kwargs: Description
"""
assert names is not None
if description is None:
description = ""
self.names = names
self.types = types
self.description = description
self.tag = tag
self.descr_only = descr_only
self.annotated = annotated
self.meta = kwargs
class Section(object):
""""""
ALIASES = {}
PARSERS = {}
is_formatted = None
args = None
args_parser = None
args_formatter = None
heading = None
alias = None
_text = None
section_indent = ""
indent = " "
meta = None
formatter_override = None
def __init__(self, heading, text="", indent=None, **kwargs):
"""
Args:
heading (str): heading of the section (should be title case)
text (str, optional): section text
indent (str, optional): used by some formatters
"""
self.heading = heading
self.alias = self.resolve_alias(heading)
if self.alias in self.PARSERS:
parser, formatter = self.PARSERS[self.alias]
self.args_parser = parser
self.args_formatter = formatter
self.is_formatted = True
else:
self.is_formatted = False
if indent is not None:
self.indent = indent
self.text = text
self.meta = kwargs
logger.debug("create section '{}' ({}) with args : '{}'".format(self.heading,
self.alias,
self.args))
@classmethod
def from_section(cls, sec):
new_sec = cls(sec.alias)
new_sec._text = sec._text # pylint: disable=protected-access
# when changing styles, the indentation should change to better fit
# the new style
# new_sec.section_indent = sec.section_indent
# new_sec.indent = sec.indent
if hasattr(sec, "args"):
new_sec.args = sec.args
return new_sec
@classmethod
def resolve_alias(cls, heading):
""""""
titled_heading = heading.title()
try:
return cls.ALIASES[titled_heading]
except KeyError:
return heading
@property
def text(self):
""""""
if self.formatter_override is not None:
s = self.formatter_override(self) # pylint: disable=not-callable
elif self.args_formatter is not None:
s = self.args_formatter(self)
else:
s = self._text
return s
@text.setter
def text(self, val):
""""""
val = strip_newlines(val, ntrailing=1)
if self.args_parser is not None:
self.args = self.args_parser(self, val)
else:
section_indent, self._text = dedent_verbose(val, n=0)
# don't overwrite section indent if val isn't indented
if section_indent:
self.section_indent = section_indent
class NapoleonSection(Section):
""""""
ALIASES = {"Args": "Parameters",
"Arguments": "Parameters",
"Deleted Args": "Deleted Parameters",
"Deleted Arguments": "Deleted Parameters",
"Other Args": "Other Parameters",
"Other Arguments": "Other Parameters",
"Keyword Args": "Keyword Arguments",
"Return": "Returns",
"Yield": "Yields",
"No Longer Returns": "No Longer Returned",
"No Longer Yields": "No Longer Yielded",
"Warnings": "Warning"
}
def is_return_section(self):
return self.heading and self.heading.lower() in ('return', 'returns',
'yield', 'yields')
def param_parser_common(self, text):
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
param_list = []
param_dict = OrderedDict()
text = dedent_docstr(text, 0)
_r = r"^\S[^\r\n]*(?:\n[^\S\n]+\S[^\r\n]*|\n)*"
param_blocks = re.findall(_r, text, re.MULTILINE)
for i, block in enumerate(param_blocks):
param = self.finalize_param(block, len(param_list))
param_list.append(param)
if self.is_return_section():
param.names = [", ".join(param.names)]
param_dict[i] = param
else:
for name in param.names:
param_dict[name] = param
return param_dict
class GoogleSection(NapoleonSection):
""""""
section_indent = " "
indent = " "
@staticmethod
def finalize_param(s, tag):
"""
Args:
s (type): Description
tag (int): index of param? not fleshed out yet
"""
meta = {}
_r = r"([^,\s]+(?:\s*,\s*[^,\s]+)*\s*)(?:\((.*)\))?\s*:\s*(.*)"
m = re.match(_r, s, re.DOTALL | re.MULTILINE)
if m:
names, typ, descr = m.groups()
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(descr, n=1)
descr_only = False
else:
names = ["{0}".format(tag)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=tag, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[GoogleSection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
logger.info("[GoogleSection] section '{}' starts formatting".format(self.alias))
s = ""
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
if len(param.names) > 1:
logger.warn("section '{}' : Google docstrings don't allow > 1 "
"parameter per description".format(self.alias))
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} ({1})".format(p, types)
if param.description:
desc = indent_docstr(param.description,
param.meta.get("indent", self.indent))
p = "{0}: {1}".format(p, desc)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class NumpySection(NapoleonSection):
""""""
indent = " "
@staticmethod
def finalize_param(s, i):
meta = {}
_r = r"\s*([^,\s]+(?:\s*,\s*[^,\s]+)*)\s*(?::\s*(.*?))?[^\S\n]*?\n(\s+.*)"
m = re.match(_r, s, re.DOTALL)
if m:
names, typ, desc = m.groups()
# FIXME hack, name for numpy parameters is always a list of names
# to support the multiple parameters per description option in
# numpy docstrings
names = [n.strip() for n in names.split(',')]
meta['indent'], descr = dedent_verbose(desc, 0)
descr_only = False
else:
names = ["{0}".format(i)]
typ = ""
descr = s
descr_only = True
return Parameter(names, typ, descr, tag=i, descr_only=descr_only, **meta)
def param_parser(self, text):
logger.info("[NumpySection] section '{}' starts parsing".format(self.alias))
return self.param_parser_common(text)
def param_formatter(self):
""""""
# NOTE: there will be some tricky business if there is a
# section break done by "resuming unindented text"
logger.info("[NumpySection] section '{}' starts formatting".format(self.alias))
s = ""
# already_seen = {}
for param in self.args.values():
if param.descr_only:
s += with_bounding_newlines(param.description, ntrailing=1)
else:
p = "{0}".format(", ".join(param.names))
if param.types:
types = param.types.strip()
if types:
p = "{0} : {1}".format(p, param.types.strip())
p = with_bounding_newlines(p, ntrailing=1)
if param.description:
p += indent_docstr(param.description,
param.meta.get("indent", self.indent),
n=0)
s += with_bounding_newlines(p, ntrailing=1)
return s
PARSERS = {"Parameters": (param_parser,
param_formatter),
"Other Parameters": (param_parser,
param_formatter),
"Deleted Parameters": (param_parser,
param_formatter),
"Keyword Arguments": (param_parser,
param_formatter),
"Attributes": (param_parser,
param_formatter),
"Deleted Attributes": (param_parser,
param_formatter),
"Raises": (param_parser,
param_formatter),
"No Longer Raises": (param_parser,
param_formatter),
"Returns": (param_parser,
param_formatter),
"Yields": (param_parser,
param_formatter),
"No Longer Returned": (param_parser,
param_formatter),
"No Longer Yielded": (param_parser,
param_formatter),
}
class Docstring(object):
"""Handle parsing / modifying / writing docstrings"""
STYLE_NAME = "none"
SECTION_STYLE = Section
TEMPLATE = OrderedDict([("Summary", None)])
PREFERRED_PARAMS_ALIAS = "Args"
sections = None
trailing_newlines = None
def __init__(self, docstr, template_order=False):
"""
Parameters:
docstr (Docstring or str): some existing docstring
template_order (bool, optional): iff True, reorder the
sections to match the order they appear in the template
"""
if isinstance(docstr, Docstring):
self.sections = docstr.sections
self.trailing_newlines = docstr.trailing_newlines
if not isinstance(docstr, type(self)):
# fixme, this is kinda hacky
make_new_sec = self.SECTION_STYLE.from_section
for sec_name, sec in docstr.sections.items():
# when the section should not exists
# i.e. when a section was generated, but isn't needed anymore
# e.g. when there isn't any exception raised
if sec:
docstr.sections[sec_name] = make_new_sec(sec)
else:
# deleting section that shouldn't be here
# including those generated with template_order=True
del docstr.sections[sec_name]
# ok, this way of changing indentation is a thunder hack
if "Parameters" in docstr.sections:
self.get_section("Parameters").heading = self.PREFERRED_PARAMS_ALIAS
for arg in self.get_section("Parameters").args.values():
arg.meta['indent'] = self.get_section("Parameters").indent
if "Returns" in docstr.sections:
for arg in self.get_section("Returns").args.values():
arg.meta['indent'] = self.get_section("Returns").indent
if "Yields" in docstr.sections:
for arg in self.get_section("Yields").args.values():
arg.meta['indent'] = self.get_section("Yields").indent
elif isinstance(docstr, string_types):
if template_order:
self.sections = self.TEMPLATE.copy()
else:
self.sections = OrderedDict()
self._parse(docstr)
def _parse(self, s):
"""Parse docstring into meta data
Parameters:
s (str): docstring
"""
raise NotImplementedError("_parse is an abstract method")
def format(self, top_indent):
"""Format docstring into a string
Parameters:
top_indent (str): indentation added to all but the first
lines
Returns:
str: properly formatted
"""
raise NotImplementedError("format is an abstract method")
def update_parameters(self, params):
""""""
raise NotImplementedError("update_parameters is an abstract method")
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return"):
""""""
raise NotImplementedError("update_return_type is an abstract method")
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_attributes is an abstract method")
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
raise NotImplementedError("update_exceptions is an abstract method")
def add_dummy_returns(self, name, typ, description):
raise NotImplementedError("add_dummy_returns is an abstract method")
def finalize_section(self, heading, text):
"""
Args:
heading (type): Description
text (type): Description
"""
section = self.SECTION_STYLE(heading, text)
self.sections[section.alias] = section
def get_section(self, section_name):
if section_name in self.sections:
return self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections[alias]
raise KeyError("Section '{0}' not found".format(section_name))
def pop_section(self, section_name):
if section_name in self.sections:
return self.sections.pop(section_name)
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
return self.sections.pop(alias)
raise KeyError("Section '{0}' not found".format(section_name))
def insert_section(self, section_name, section):
if section.heading != section_name:
section.heading = section_name
self.sections[section_name] = section
def section_exists(self, section_name):
"""returns True iff section exists, and was finalized"""
sec = None
if section_name in self.sections:
sec = self.sections[section_name]
elif section_name in self.SECTION_STYLE.ALIASES:
alias = self.SECTION_STYLE.resolve_alias(section_name)
if alias in self.sections:
sec = self.sections[alias]
if sec is not None:
return True
return False
class NapoleonDocstring(Docstring): # pylint: disable=abstract-method
"""Styles understood by napoleon, aka. Google/Numpy"""
STYLE_NAME = "napoleon"
TEMPLATE = OrderedDict([("Summary", None),
("Parameters", None),
("Keyword Arguments", None),
("Returns", None),
("Yields", None),
("No Longer Returned", None),
("No Longer Yielded", None),
("Other Parameters", None),
("Deleted Parameters", None),
("Attributes", None),
("Deleted Attributes", None),
("Methods", None),
("Raises", None),
("No Longer Raises", None),
("Warns", None),
("See Also", None),
("Warning", None),
("Note", None),
("Notes", None),
("References", None),
("Example", None),
("Examples", None),
])
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip()
def _parse(self, s):
"""
Args:
s (type): Description
"""
logger.info("[NapoleonDocstring] starts parsing text")
self.trailing_newlines = count_trailing_newlines(s)
s = dedent_docstr(s)
sec_starts = [(m.start(), m.end(), m.string[m.start():m.end()])
for m in re.finditer(self.SECTION_RE, s, re.MULTILINE)]
sec_starts.insert(0, (0, 0, "Summary"))
sec_starts.append((len(s), len(s), ""))
for current_sec, next_sec in zip(sec_starts[:-1], sec_starts[1:]):
sec_name = self._extract_section_name(current_sec[2])
sec_body = s[current_sec[1]:next_sec[0]]
self.finalize_section(sec_name, sec_body)
@staticmethod
def _format_section_text(heading, body):
raise NotImplementedError("This is an abstract method")
def format(self, top_indent):
"""
Args:
top_indent (type): Description
"""
logger.info("[NapoleonDocstring] starts formatting")
s = ""
if self.section_exists("Summary"):
sec_text = self.get_section("Summary").text
if sec_text.strip():
s += with_bounding_newlines(sec_text, nleading=0, ntrailing=1)
for _, section in islice(self.sections.items(), 1, None):
if section is None:
continue
sec_body = indent_docstr(section.text, section.section_indent, n=0)
sec_text = self._format_section_text(section.heading, sec_body)
s += with_bounding_newlines(sec_text, nleading=1, ntrailing=1)
if self.trailing_newlines:
s = with_bounding_newlines(s, ntrailing=self.trailing_newlines)
s = indent_docstr(s, top_indent)
return s
def _update_section(self, params, sec_name, sec_alias=None,
del_prefix="Deleted ", alpha_order=False,
other_sections=()):
"""Update section to add / remove params
As a failsafe, params that are removed are placed in a
"Deleted ..." section
Args:
params (OrderedDict): dict of Parameter objects
sec_name (str): generic section name
sec_alias (str): section name that appears in teh docstring
del_prefix (str): prefix for section that holds params that
no longer exist.
alpha_order (bool): whether or not to alphabetically sort
the params
"""
if not sec_alias:
sec_alias = sec_name
if not self.section_exists(sec_name) and len(params) == 0:
return None
elif not self.section_exists(sec_name):
self.finalize_section(sec_alias, "")
# put together which other sections exist so we can use them to
# exclude params that exist in them
_other = []
for _secname in other_sections:
if self.section_exists(_secname):
_other.append(self.get_section(_secname))
other_sections = _other
if alpha_order:
sorted_params = OrderedDict()
for k in sorted(list(params.keys()), key=str.lower):
sorted_params[k] = params[k]
params = sorted_params
current_dict = self.get_section(sec_name).args
# go through params in the order of the function declaration
# and cherry-pick from current_dict if there's already a description
# for that parameter
tags_seen = dict()
new = OrderedDict()
for name, param in params.items():
if name in current_dict:
def_param = param
param = current_dict.pop(name)
if param.tag in tags_seen:
param = None
else:
tags_seen[param.tag] = True
# update the type if annotated
if def_param.annotated:
param.types = def_param.types
else:
# if param is in one of the 'other sections', then don't
# worry about it
for sec in other_sections:
if name in sec.args:
# update the type if the annotated
if param.annotated:
sec.args[name].types = param.types
# now ignore it
param = None
if param:
new[name] = param
# add description only parameters back in
for key, param in current_dict.items():
if param.descr_only:
# param.description = '\n' + param.description
new[key] = current_dict.pop(key)
# not sure when this guy gets created
if '' in current_dict:
del current_dict['']
# go through params that are no linger in the arguments list and
# move them from the Parameters section of the docstring to the
# deleted parameters section
if len(current_dict):
del_sec_name = del_prefix + sec_name
del_sec_alias = del_prefix + sec_alias
logger.warn("killing parameters named: {}".format(current_dict.keys()))
# TODO: put a switch here for other bahavior?
if not self.section_exists(self.SECTION_STYLE.resolve_alias(del_sec_name)):
self.finalize_section(del_sec_name, "")
deled_params = self.get_section(del_sec_name)
deleted_tags = dict()
for key, val in current_dict.items():
if key in deled_params.args:
logger.warn("Stronger Warning: Killing old deleted param: "
"'{0}'".format(key))
val.names.remove(key)
if val.tag in deleted_tags:
deleted_tags[val.tag].names.append(key)
else:
new_val = Parameter([key], val.types, val.description)
deleted_tags[val.tag] = new_val
deled_params.args[key] = new_val
if len(new) == 0:
self.sections[sec_name] = None
else:
self.sections[sec_name].args = new
def update_parameters(self, params):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update parameters")
other_sections = ['Other Parameters', 'Keyword Parameters']
self._update_section(params, "Parameters", self.PREFERRED_PARAMS_ALIAS,
other_sections=other_sections)
def update_return_type(self, ret_name, ret_type,
default_description="Description",
keyword="return", del_prefix="No Longer "):
""""""
logger.info("[NapoleonDocstring] update return type")
if keyword == "yield":
sec_name = "Yields"
elif keyword == "return":
sec_name = "Returns"
else:
logger.debug("Unknown return keyword: '{}'".format(keyword))
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
del_sec_name = del_prefix + std_ret_name
del_sec_alias = self.SECTION_STYLE.resolve_alias(del_sec_name)
if not self.section_exists(del_sec_alias):
self.finalize_section(del_sec_alias, "")
del_sec = self.get_section(del_sec_alias)
sec = self.pop_section(std_ret_name)
del_sec.args = sec.args
return
if not self.section_exists(sec_name):
# see if a section exists from another keyword, ie, maybe
# this function used to return, but now it yields
for std_ret_name in ("Yields", "Returns"):
if self.section_exists(std_ret_name):
# necessary to recreate completly the section
# in order to use the right parser and formatter
logger.debug("old return section exists : '{}'".format(std_ret_name))
old_sec = self.pop_section(std_ret_name)
self.finalize_section(sec_name, "")
new_sec = self.get_section(sec_name)
new_sec.args = old_sec.args
self.insert_section(sec_name, new_sec)
break
if self.section_exists(sec_name):
sec = self.get_section(sec_name)
if sec.args and ret_type:
p0 = next(iter(sec.args.values()))
if p0.descr_only:
p0.description = ret_type
elif p0.types:
p0.types = ret_type
elif p0.names:
p0.names = [ret_type]
elif ret_name or ret_type:
description = default_description
sec.args = OrderedDict()
if ret_name:
sec.args[ret_name] = Parameter([ret_name], ret_type, description)
else:
sec.args[ret_type] = Parameter([ret_type], "", description)
else:
# and i ask myself, how did i get here?
pass
else:
self.finalize_section(sec_name, "")
sec = self.get_section(sec_name)
ret_type = ret_type if ret_type != "" else "${NUMBER:TYPE}"
sec.args = OrderedDict()
sec.args[ret_type] = Parameter([ret_type], "", default_description)
def update_attributes(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update attributes")
self._update_section(attribs, "Attributes", alpha_order=alpha_order)
def update_exceptions(self, attribs, alpha_order=True):
"""
Args:
params (OrderedDict): params objects keyed by their names
"""
logger.info("[NapoleonDocstring] update exceptions")
self._update_section(attribs, "Raises", del_prefix="No Longer ",
alpha_order=alpha_order)
def add_dummy_returns(self, name, typ, description):
# No longer used??
if not self.section_exists("Returns"):
sec = self.SECTION_STYLE("Returns")
if name:
sec.args = {name: Parameter([name], typ, description)}
else:
sec.args = {typ: Parameter([typ], "", description)}
self.sections["Returns"] = sec
class GoogleDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "google"
SECTION_STYLE = GoogleSection
SECTION_RE = r"^[A-Za-z0-9][A-Za-z0-9 \t]*:\s*$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Args"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip(':').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}:\n{1}".format(heading, body)
class NumpyDocstring(NapoleonDocstring):
""""""
STYLE_NAME = "numpy"
SECTION_STYLE = NumpySection
SECTION_RE = r"^([A-Za-z0-9][A-Za-z0-9 \t]*)\s*\n-+\s*?$\r?\n?"
PREFERRED_PARAMS_ALIAS = "Parameters"
@classmethod
def detect_style(cls, docstr):
""""""
m = re.search(cls.SECTION_RE, docstr, re.MULTILINE)
return m is not None
@staticmethod
def _extract_section_name(sec_re_result):
return sec_re_result.strip().rstrip('-').rstrip()
@staticmethod
def _format_section_text(heading, body):
return "{0}\n{1}\n{2}".format(heading, "-" * len(heading), body)
STYLE_LOOKUP = OrderedDict([('numpy', NumpyDocstring),
('google', GoogleDocstring)])
##
## EOF
##
|
get_transport_class
|
Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
# MASKED: get_transport_class function (lines 55-73)
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
|
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
| 55
| 73
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
|
_get_default_mtls_endpoint
|
Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
# MASKED: _get_default_mtls_endpoint function (lines 79-107)
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
|
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
| 79
| 107
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
|
from_service_account_info
|
Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
# MASKED: from_service_account_info function (lines 114-131)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
|
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
| 114
| 131
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
|
from_service_account_file
|
Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
# MASKED: from_service_account_file function (lines 133-151)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
|
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
| 133
| 151
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import feed_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import FeedServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FeedServiceGrpcTransport
class FeedServiceClientMeta(type):
"""Metaclass for the FeedService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry["grpc"] = FeedServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedServiceClient(metaclass=FeedServiceClientMeta):
"""Service to manage feeds."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedServiceTransport:
"""Returns the transport used by the client instance.
Returns:
FeedServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_path(customer_id: str, feed_id: str,) -> str:
"""Returns a fully-qualified feed string."""
return "customers/{customer_id}/feeds/{feed_id}".format(
customer_id=customer_id, feed_id=feed_id,
)
@staticmethod
def parse_feed_path(path: str) -> Dict[str, str]:
"""Parses a feed path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feeds/(?P<feed_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FeedServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the feed service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FeedServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FeedServiceTransport):
# transport is a FeedServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_feeds(
self,
request: Union[feed_service.MutateFeedsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[feed_service.FeedOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> feed_service.MutateFeedsResponse:
r"""Creates, updates, or removes feeds. Operation statuses are
returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`DatabaseError <>`__ `DistinctError <>`__ `FeedError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
Args:
request (Union[google.ads.googleads.v10.services.types.MutateFeedsRequest, dict]):
The request object. Request message for
[FeedService.MutateFeeds][google.ads.googleads.v10.services.FeedService.MutateFeeds].
customer_id (str):
Required. The ID of the customer
whose feeds are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.FeedOperation]):
Required. The list of operations to
perform on individual feeds.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateFeedsResponse:
Response message for an feed mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a feed_service.MutateFeedsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, feed_service.MutateFeedsRequest):
request = feed_service.MutateFeedsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_feeds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FeedServiceClient",)
|
setup_task
|
Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
# MASKED: setup_task function (lines 243-278)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
| 243
| 278
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
load_dataset
|
Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
# MASKED: load_dataset function (lines 280-315)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
| 280
| 315
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import os
from argparse import Namespace
import numpy as np
from fairseq import metrics, options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.tasks import LegacyFairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@register_task("translation")
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(
paths[0]
)
if args.source_lang is None or args.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(args.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(args.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.args.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
prepare_transforms
|
Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
|
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
# MASKED: prepare_transforms function (lines 39-120)
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
|
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
| 39
| 120
|
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
|
prepare_dataloaders
|
Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
|
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
# MASKED: prepare_dataloaders function (lines 203-232)
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
|
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
| 203
| 232
|
import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
|
build_fsm_spec_random
|
Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
# MASKED: build_fsm_spec_random function (lines 120-159)
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
| 120
| 159
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
build_fsm_spec_max_in_out
|
Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
# MASKED: build_fsm_spec_max_in_out function (lines 162-213)
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
| 162
| 213
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
build_fsm_spec_free_run
|
Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
# MASKED: build_fsm_spec_free_run function (lines 216-261)
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
| 216
| 261
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
test_fsm_num_samples
|
Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
# MASKED: test_fsm_num_samples function (lines 264-322)
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
| 264
| 322
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
test_fsm_num_states1
|
Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
# MASKED: test_fsm_num_states1 function (lines 493-525)
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
| 493
| 525
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
test_fsm_num_states2
|
Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
# MASKED: test_fsm_num_states2 function (lines 528-572)
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
| 528
| 572
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
test_fsm_max_in_out
|
Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
# MASKED: test_fsm_max_in_out function (lines 575-623)
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
| 575
| 623
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
|
modify_issue
|
Modify existing issue (PRs are issues)
Arguments:
labels: list of labels to assign to issue
title: new title
body: new body
|
"""Highlevel API for managing PRs on Github"""
import abc
import logging
from copy import copy
from enum import Enum
from typing import Any, Dict, List, Optional
import gidgethub
import gidgethub.aiohttp
import aiohttp
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#: State for Github Issues
IssueState = Enum("IssueState", "open closed all") # pylint: disable=invalid-name
class GitHubHandler:
"""Handles interaction with GitHub
Arguments:
token: OAUTH token granting permissions to GH
dry_run: Don't actually modify things if set
to_user: Target User/Org for PRs
to_repo: Target repository within **to_user**
"""
PULLS = "/repos/{user}/{repo}/pulls{/number}{?head,base,state}"
ISSUES = "/repos/{user}/{repo}/issues{/number}"
ORG_MEMBERS = "/orgs/{user}/members{/username}"
STATE = IssueState
def __init__(self, token: str,
dry_run: bool = False,
to_user: str = "bioconda",
to_repo: str = "bioconnda-recipes") -> None:
self.token = token
self.dry_run = dry_run
self.var_default = {'user': to_user,
'repo': to_repo}
# filled in by login():
self.api: gidgethub.abc.GitHubAPI = None
self.username: str = None
@abc.abstractmethod
def create_api_object(self, *args, **kwargs):
"""Create API object"""
def get_file_relurl(self, path: str, branch_name: str = "master") -> str:
"""Format domain relative url for **path** on **branch_name**"""
return "/{user}/{repo}/tree/{branch_name}/{path}".format(
branch_name=branch_name, path=path, **self.var_default)
async def login(self, *args, **kwargs):
"""Log into API (fills `self.username`)"""
self.create_api_object(*args, **kwargs)
if not self.token:
self.username = "UNKNOWN [no token]"
else:
user = await self.api.getitem("/user")
self.username = user["login"]
async def is_member(self, username) -> bool:
"""Check if **username** is member of current org"""
if not username:
return False
var_data = copy(self.var_default)
var_data['username'] = username
try:
await self.api.getitem(self.ORG_MEMBERS, var_data)
except gidgethub.BadRequest:
logger.debug("User %s is not a member of %s", username, var_data['user'])
return False
logger.debug("User %s IS a member of %s", username, var_data['user'])
return True
# pylint: disable=too-many-arguments
async def get_prs(self,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = None,
number: Optional[int] = None,
state: Optional[IssueState] = None) -> List[Dict[Any, Any]]:
"""Retrieve list of PRs matching parameters
Arguments:
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
(default: from auth)
to_branch: Name of branch into which to pull (default: master)
number: PR number
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
if from_branch:
if from_user:
var_data['head'] = f"{from_user}:{from_branch}"
else:
var_data['head'] = from_branch
if to_branch:
var_data['base'] = to_branch
if number:
var_data['number'] = str(number)
if state:
var_data['state'] = state.name.lower()
return await self.api.getitem(self.PULLS, var_data)
# pylint: disable=too-many-arguments
async def create_pr(self, title: str,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = "master",
body: Optional[str] = None,
maintainer_can_modify: bool = True) -> Dict[Any, Any]:
"""Create new PR
Arguments:
title: Title of new PR
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
to_branch: Name of branch into which to pull (default: master)
body: Body text of PR
maintainer_can_modify: Whether to allow maintainer to modify from_branch
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
data: Dict[str, Any] = {'title': title,
'body': '',
'maintainer_can_modify': maintainer_can_modify}
if body:
data['body'] += body
if from_branch:
if from_user and from_user != self.username:
data['head'] = f"{from_user}:{from_branch}"
else:
data['head'] = from_branch
if to_branch:
data['base'] = to_branch
logger.debug("PR data %s", data)
if self.dry_run:
logger.info("Would create PR '%s'", title)
return {'number': -1}
logger.info("Creating PR '%s'", title)
return await self.api.post(self.PULLS, var_data, data=data)
# MASKED: modify_issue function (lines 157-189)
class AiohttpGitHubHandler(GitHubHandler):
"""GitHubHandler using Aiohttp for HTTP requests
Arguments:
session: Aiohttp Client Session object
requester: Identify self (e.g. user agent)
"""
def create_api_object(self, session: aiohttp.ClientSession,
requester: str, *args, **kwargs) -> None:
self.api = gidgethub.aiohttp.GitHubAPI(
session, requester, oauth_token=self.token
)
|
async def modify_issue(self, number: int,
labels: Optional[List[str]] = None,
title: Optional[str] = None,
body: Optional[str] = None) -> Dict[Any, Any]:
"""Modify existing issue (PRs are issues)
Arguments:
labels: list of labels to assign to issue
title: new title
body: new body
"""
var_data = copy(self.var_default)
var_data["number"] = str(number)
data: Dict[str, Any] = {}
if labels:
data['labels'] = labels
if title:
data['title'] = title
if body:
data['body'] = body
if self.dry_run:
logger.info("Would modify PR %s", number)
if title:
logger.info("New title: %s", title)
if labels:
logger.info("New labels: %s", labels)
if body:
logger.info("New Body:\n%s\n", body)
return {'number': number}
logger.info("Modifying PR %s", number)
return await self.api.patch(self.ISSUES, var_data, data=data)
| 157
| 189
|
"""Highlevel API for managing PRs on Github"""
import abc
import logging
from copy import copy
from enum import Enum
from typing import Any, Dict, List, Optional
import gidgethub
import gidgethub.aiohttp
import aiohttp
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#: State for Github Issues
IssueState = Enum("IssueState", "open closed all") # pylint: disable=invalid-name
class GitHubHandler:
"""Handles interaction with GitHub
Arguments:
token: OAUTH token granting permissions to GH
dry_run: Don't actually modify things if set
to_user: Target User/Org for PRs
to_repo: Target repository within **to_user**
"""
PULLS = "/repos/{user}/{repo}/pulls{/number}{?head,base,state}"
ISSUES = "/repos/{user}/{repo}/issues{/number}"
ORG_MEMBERS = "/orgs/{user}/members{/username}"
STATE = IssueState
def __init__(self, token: str,
dry_run: bool = False,
to_user: str = "bioconda",
to_repo: str = "bioconnda-recipes") -> None:
self.token = token
self.dry_run = dry_run
self.var_default = {'user': to_user,
'repo': to_repo}
# filled in by login():
self.api: gidgethub.abc.GitHubAPI = None
self.username: str = None
@abc.abstractmethod
def create_api_object(self, *args, **kwargs):
"""Create API object"""
def get_file_relurl(self, path: str, branch_name: str = "master") -> str:
"""Format domain relative url for **path** on **branch_name**"""
return "/{user}/{repo}/tree/{branch_name}/{path}".format(
branch_name=branch_name, path=path, **self.var_default)
async def login(self, *args, **kwargs):
"""Log into API (fills `self.username`)"""
self.create_api_object(*args, **kwargs)
if not self.token:
self.username = "UNKNOWN [no token]"
else:
user = await self.api.getitem("/user")
self.username = user["login"]
async def is_member(self, username) -> bool:
"""Check if **username** is member of current org"""
if not username:
return False
var_data = copy(self.var_default)
var_data['username'] = username
try:
await self.api.getitem(self.ORG_MEMBERS, var_data)
except gidgethub.BadRequest:
logger.debug("User %s is not a member of %s", username, var_data['user'])
return False
logger.debug("User %s IS a member of %s", username, var_data['user'])
return True
# pylint: disable=too-many-arguments
async def get_prs(self,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = None,
number: Optional[int] = None,
state: Optional[IssueState] = None) -> List[Dict[Any, Any]]:
"""Retrieve list of PRs matching parameters
Arguments:
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
(default: from auth)
to_branch: Name of branch into which to pull (default: master)
number: PR number
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
if from_branch:
if from_user:
var_data['head'] = f"{from_user}:{from_branch}"
else:
var_data['head'] = from_branch
if to_branch:
var_data['base'] = to_branch
if number:
var_data['number'] = str(number)
if state:
var_data['state'] = state.name.lower()
return await self.api.getitem(self.PULLS, var_data)
# pylint: disable=too-many-arguments
async def create_pr(self, title: str,
from_branch: Optional[str] = None,
from_user: Optional[str] = None,
to_branch: Optional[str] = "master",
body: Optional[str] = None,
maintainer_can_modify: bool = True) -> Dict[Any, Any]:
"""Create new PR
Arguments:
title: Title of new PR
from_branch: Name of branch from which PR asks to pull
from_user: Name of user/org in from which to pull
to_branch: Name of branch into which to pull (default: master)
body: Body text of PR
maintainer_can_modify: Whether to allow maintainer to modify from_branch
"""
var_data = copy(self.var_default)
if not from_user:
from_user = self.username
data: Dict[str, Any] = {'title': title,
'body': '',
'maintainer_can_modify': maintainer_can_modify}
if body:
data['body'] += body
if from_branch:
if from_user and from_user != self.username:
data['head'] = f"{from_user}:{from_branch}"
else:
data['head'] = from_branch
if to_branch:
data['base'] = to_branch
logger.debug("PR data %s", data)
if self.dry_run:
logger.info("Would create PR '%s'", title)
return {'number': -1}
logger.info("Creating PR '%s'", title)
return await self.api.post(self.PULLS, var_data, data=data)
async def modify_issue(self, number: int,
labels: Optional[List[str]] = None,
title: Optional[str] = None,
body: Optional[str] = None) -> Dict[Any, Any]:
"""Modify existing issue (PRs are issues)
Arguments:
labels: list of labels to assign to issue
title: new title
body: new body
"""
var_data = copy(self.var_default)
var_data["number"] = str(number)
data: Dict[str, Any] = {}
if labels:
data['labels'] = labels
if title:
data['title'] = title
if body:
data['body'] = body
if self.dry_run:
logger.info("Would modify PR %s", number)
if title:
logger.info("New title: %s", title)
if labels:
logger.info("New labels: %s", labels)
if body:
logger.info("New Body:\n%s\n", body)
return {'number': number}
logger.info("Modifying PR %s", number)
return await self.api.patch(self.ISSUES, var_data, data=data)
class AiohttpGitHubHandler(GitHubHandler):
"""GitHubHandler using Aiohttp for HTTP requests
Arguments:
session: Aiohttp Client Session object
requester: Identify self (e.g. user agent)
"""
def create_api_object(self, session: aiohttp.ClientSession,
requester: str, *args, **kwargs) -> None:
self.api = gidgethub.aiohttp.GitHubAPI(
session, requester, oauth_token=self.token
)
|
check_specs_against_mirrors
|
Check all the given specs against buildcaches on the given mirrors and
determine if any of the specs need to be rebuilt. Reasons for needing to
rebuild include binary cache for spec isn't present on a mirror, or it is
present but the full_hash has changed since last time spec was built.
Arguments:
mirrors (dict): Mirrors to check against
specs (iterable): Specs to check against mirrors
output_file (string): Path to output file to be written. If provided,
mirrors with missing or out-of-date specs will be formatted as a
JSON object and written to this file.
rebuild_on_errors (boolean): Treat any errors encountered while
checking specs as a signal to rebuild package.
Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import codecs
import os
import re
import tarfile
import shutil
import tempfile
import hashlib
import glob
import platform
from contextlib import closing
import ruamel.yaml as yaml
import json
from six.moves.urllib.error import URLError
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
import spack.cmd
import spack.config as config
import spack.fetch_strategy as fs
import spack.util.gpg
import spack.relocate as relocate
import spack.util.spack_yaml as syaml
import spack.mirror
import spack.util.url as url_util
import spack.util.web as web_util
from spack.spec import Spec
from spack.stage import Stage
from spack.util.gpg import Gpg
import spack.architecture as architecture
_build_cache_relative_path = 'build_cache'
BUILD_CACHE_INDEX_TEMPLATE = '''
<html>
<head>
<title>{title}</title>
</head>
<body>
<ul>
{path_list}
</ul>
</body>
</html>
'''
BUILD_CACHE_INDEX_ENTRY_TEMPLATE = ' <li><a href="{path}">{path}</a></li>'
class NoOverwriteException(spack.error.SpackError):
"""
Raised when a file exists and must be overwritten.
"""
def __init__(self, file_path):
err_msg = "\n%s\nexists\n" % file_path
err_msg += "Use -f option to overwrite."
super(NoOverwriteException, self).__init__(err_msg)
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super(NoGpgException, self).__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super(NoKeyException, self).__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super(PickKeyException, self).__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
pass
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super(NewLayoutException, self).__init__(msg)
def build_cache_relative_path():
return _build_cache_relative_path
def build_cache_prefix(prefix):
return os.path.join(prefix, build_cache_relative_path())
def buildinfo_file_name(prefix):
"""
Filename of the binary package meta-data file
"""
name = os.path.join(prefix, ".spack/binary_distribution")
return name
def read_buildinfo_file(prefix):
"""
Read buildinfo file
"""
filename = buildinfo_file_name(prefix)
with open(filename, 'r') as inputfile:
content = inputfile.read()
buildinfo = yaml.load(content)
return buildinfo
def write_buildinfo_file(spec, workdir, rel=False):
"""
Create a cache file containing information
required for the relocation
"""
prefix = spec.prefix
text_to_relocate = []
binary_to_relocate = []
link_to_relocate = []
blacklist = (".spack", "man")
prefix_to_hash = dict()
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
deps = spack.build_environment.get_rpath_deps(spec.package)
for d in deps:
prefix_to_hash[str(d.prefix)] = d.dag_hash()
# Do this at during tarball creation to save time when tarball unpacked.
# Used by make_package_relative to determine binaries to change.
for root, dirs, files in os.walk(prefix, topdown=True):
dirs[:] = [d for d in dirs if d not in blacklist]
for filename in files:
path_name = os.path.join(root, filename)
m_type, m_subtype = relocate.mime_type(path_name)
if os.path.islink(path_name):
link = os.readlink(path_name)
if os.path.isabs(link):
# Relocate absolute links into the spack tree
if link.startswith(spack.store.layout.root):
rel_path_name = os.path.relpath(path_name, prefix)
link_to_relocate.append(rel_path_name)
else:
msg = 'Absolute link %s to %s ' % (path_name, link)
msg += 'outside of prefix %s ' % prefix
msg += 'should not be relocated.'
tty.warn(msg)
if relocate.needs_binary_relocation(m_type, m_subtype):
if not filename.endswith('.o'):
rel_path_name = os.path.relpath(path_name, prefix)
binary_to_relocate.append(rel_path_name)
if relocate.needs_text_relocation(m_type, m_subtype):
rel_path_name = os.path.relpath(path_name, prefix)
text_to_relocate.append(rel_path_name)
# Create buildinfo data and write it to disk
buildinfo = {}
buildinfo['relative_rpaths'] = rel
buildinfo['buildpath'] = spack.store.layout.root
buildinfo['spackprefix'] = spack.paths.prefix
buildinfo['relative_prefix'] = os.path.relpath(
prefix, spack.store.layout.root)
buildinfo['relocate_textfiles'] = text_to_relocate
buildinfo['relocate_binaries'] = binary_to_relocate
buildinfo['relocate_links'] = link_to_relocate
buildinfo['prefix_to_hash'] = prefix_to_hash
filename = buildinfo_file_name(workdir)
with open(filename, 'w') as outfile:
outfile.write(syaml.dump(buildinfo, default_flow_style=True))
def tarball_directory_name(spec):
"""
Return name of the tarball directory according to the convention
<os>-<architecture>/<compiler>/<package>-<version>/
"""
return "%s/%s/%s-%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name, spec.version)
def tarball_name(spec, ext):
"""
Return the name of the tarfile according to the convention
<os>-<architecture>-<package>-<dag_hash><ext>
"""
return "%s-%s-%s-%s-%s%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name,
spec.version,
spec.dag_hash(),
ext)
def tarball_path_name(spec, ext):
"""
Return the full path+name for a given spec according to the convention
<tarball_directory_name>/<tarball_name>
"""
return os.path.join(tarball_directory_name(spec),
tarball_name(spec, ext))
def checksum_tarball(file):
# calculate sha256 hash of tar file
block_size = 65536
hasher = hashlib.sha256()
with open(file, 'rb') as tfile:
buf = tfile.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = tfile.read(block_size)
return hasher.hexdigest()
def sign_tarball(key, force, specfile_path):
# Sign the packages if keys available
if spack.util.gpg.Gpg.gpg() is None:
raise NoGpgException(
"gpg2 is not available in $PATH .\n"
"Use spack install gnupg and spack load gnupg.")
if key is None:
keys = Gpg.signing_keys()
if len(keys) == 1:
key = keys[0]
if len(keys) > 1:
raise PickKeyException(str(keys))
if len(keys) == 0:
msg = "No default key available for signing.\n"
msg += "Use spack gpg init and spack gpg create"
msg += " to create a default key."
raise NoKeyException(msg)
if os.path.exists('%s.asc' % specfile_path):
if force:
os.remove('%s.asc' % specfile_path)
else:
raise NoOverwriteException('%s.asc' % specfile_path)
Gpg.sign(key, specfile_path, '%s.asc' % specfile_path)
def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.html" page at the location given in
cache_prefix. This page contains a link for each binary package (*.yaml)
and public key (*.key) under cache_prefix.
"""
tmpdir = tempfile.mkdtemp()
try:
index_html_path = os.path.join(tmpdir, 'index.html')
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if (entry.endswith('.yaml')
or entry.endswith('.key')))
with open(index_html_path, 'w') as f:
f.write(BUILD_CACHE_INDEX_TEMPLATE.format(
title='Spack Package Index',
path_list='\n'.join(
BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path)
for path in file_list)))
web_util.push_to_url(
index_html_path,
url_util.join(cache_prefix, 'index.html'),
keep_original=False,
extra_args={'ContentType': 'text/html'})
finally:
shutil.rmtree(tmpdir)
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
allow_root=False, key=None, regenerate_index=False):
"""
Build a tarball from given spec and put it into the directory structure
used at the mirror (following <tarball_directory_name>).
"""
if not spec.concrete:
raise ValueError('spec must be concrete to build tarball')
# set up some paths
tmpdir = tempfile.mkdtemp()
cache_prefix = build_cache_prefix(tmpdir)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
spackfile_path = os.path.join(
cache_prefix, tarball_path_name(spec, '.spack'))
remote_spackfile_path = url_util.join(
outdir, os.path.relpath(spackfile_path, tmpdir))
mkdirp(tarfile_dir)
if web_util.url_exists(remote_spackfile_path):
if force:
web_util.remove_url(remote_spackfile_path)
else:
raise NoOverwriteException(url_util.format(remote_spackfile_path))
# need to copy the spec file so the build cache can be downloaded
# without concretizing with the current spack packages
# and preferences
spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.realpath(
os.path.join(cache_prefix, specfile_name))
remote_specfile_path = url_util.join(
outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))
if web_util.url_exists(remote_specfile_path):
if force:
web_util.remove_url(remote_specfile_path)
else:
raise NoOverwriteException(url_util.format(remote_specfile_path))
# make a copy of the install directory to work with
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % spec.prefix,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(workdir)
os.remove(temp_tarfile_path)
# create info for later relocation and create tar
write_buildinfo_file(spec, workdir, rel)
# optionally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
if rel:
try:
make_package_relative(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
else:
try:
check_package_relocatable(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
# create gzip compressed tarball of the install prefix
with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
tar.add(name='%s' % workdir,
arcname='%s' % os.path.basename(spec.prefix))
# remove copy of install directory
shutil.rmtree(workdir)
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# add sha256 checksum to spec.yaml
with open(spec_file, 'r') as inputfile:
content = inputfile.read()
spec_dict = yaml.load(content)
bchecksum = {}
bchecksum['hash_algorithm'] = 'sha256'
bchecksum['hash'] = checksum
spec_dict['binary_cache_checksum'] = bchecksum
# Add original install prefix relative to layout root to spec.yaml.
# This will be used to determine is the directory layout has changed.
buildinfo = {}
buildinfo['relative_prefix'] = os.path.relpath(
spec.prefix, spack.store.layout.root)
buildinfo['relative_rpaths'] = rel
spec_dict['buildinfo'] = buildinfo
spec_dict['full_hash'] = spec.full_hash()
tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format(
spec_dict['full_hash'],
spec.name,
url_util.format(remote_specfile_path)))
tty.debug(spec.tree())
with open(specfile_path, 'w') as outfile:
outfile.write(syaml.dump(spec_dict))
# sign the tarball and spec file with gpg
if not unsigned:
sign_tarball(key, force, specfile_path)
# put tarball, spec and signature files in .spack archive
with closing(tarfile.open(spackfile_path, 'w')) as tar:
tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
tar.add(name=specfile_path, arcname='%s' % specfile_name)
if not unsigned:
tar.add(name='%s.asc' % specfile_path,
arcname='%s.asc' % specfile_name)
# cleanup file moved to archive
os.remove(tarfile_path)
if not unsigned:
os.remove('%s.asc' % specfile_path)
web_util.push_to_url(
spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(
specfile_path, remote_specfile_path, keep_original=False)
tty.msg('Buildache for "%s" written to \n %s' %
(spec, remote_spackfile_path))
try:
# create an index.html for the build_cache directory so specs can be
# found
if regenerate_index:
generate_package_index(url_util.join(
outdir, os.path.relpath(cache_prefix, tmpdir)))
finally:
shutil.rmtree(tmpdir)
return None
def download_tarball(spec):
"""
Download binary tarball for given package into stage area
Return True if successful
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of pre-compiled packages.")
tarball = tarball_path_name(spec, '.spack')
for mirror in spack.mirror.MirrorCollection().values():
url = url_util.join(
mirror.fetch_url, _build_cache_relative_path, tarball)
# stage the tarball into standard place
stage = Stage(url, name="build_cache", keep=True)
try:
stage.fetch()
return stage.save_filename
except fs.FetchError:
continue
return None
def make_package_relative(workdir, spec, allow_root):
"""
Change paths in binaries to relative paths. Change absolute symlinks
to relative symlinks.
"""
prefix = spec.prefix
buildinfo = read_buildinfo_file(workdir)
old_layout_root = buildinfo['buildpath']
orig_path_names = list()
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
orig_path_names.append(os.path.join(prefix, filename))
cur_path_names.append(os.path.join(workdir, filename))
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.make_macho_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.make_elf_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
relocate.check_files_relocatable(cur_path_names, allow_root)
orig_path_names = list()
cur_path_names = list()
for linkname in buildinfo.get('relocate_links', []):
orig_path_names.append(os.path.join(prefix, linkname))
cur_path_names.append(os.path.join(workdir, linkname))
relocate.make_link_relative(cur_path_names, orig_path_names)
def check_package_relocatable(workdir, spec, allow_root):
"""
Check if package binaries are relocatable.
Change links to placeholder links.
"""
buildinfo = read_buildinfo_file(workdir)
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
cur_path_names.append(os.path.join(workdir, filename))
relocate.check_files_relocatable(cur_path_names, allow_root)
def relocate_package(spec, allow_root):
"""
Relocate the given package
"""
workdir = str(spec.prefix)
buildinfo = read_buildinfo_file(workdir)
new_layout_root = str(spack.store.layout.root)
new_prefix = str(spec.prefix)
new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root))
new_spack_prefix = str(spack.paths.prefix)
old_layout_root = str(buildinfo['buildpath'])
old_spack_prefix = str(buildinfo.get('spackprefix'))
old_rel_prefix = buildinfo.get('relative_prefix')
old_prefix = os.path.join(old_layout_root, old_rel_prefix)
rel = buildinfo.get('relative_rpaths')
prefix_to_hash = buildinfo.get('prefix_to_hash', None)
if (old_rel_prefix != new_rel_prefix and not prefix_to_hash):
msg = "Package tarball was created from an install "
msg += "prefix with a different directory layout and an older "
msg += "buildcache create implementation. It cannot be relocated."
raise NewLayoutException(msg)
# older buildcaches do not have the prefix_to_hash dictionary
# need to set an empty dictionary and add one entry to
# prefix_to_prefix to reproduce the old behavior
if not prefix_to_hash:
prefix_to_hash = dict()
hash_to_prefix = dict()
hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix)
new_deps = spack.build_environment.get_rpath_deps(spec.package)
for d in new_deps:
hash_to_prefix[d.format('{hash}')] = str(d.prefix)
prefix_to_prefix = dict()
for orig_prefix, hash in prefix_to_hash.items():
prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None)
prefix_to_prefix[old_prefix] = new_prefix
prefix_to_prefix[old_layout_root] = new_layout_root
tty.debug("Relocating package from",
"%s to %s." % (old_layout_root, new_layout_root))
def is_backup_file(file):
return file.endswith('~')
# Text files containing the prefix text
text_names = list()
for filename in buildinfo['relocate_textfiles']:
text_name = os.path.join(workdir, filename)
# Don't add backup files generated by filter_file during install step.
if not is_backup_file(text_name):
text_names.append(text_name)
# If we are installing back to the same location don't replace anything
if old_layout_root != new_layout_root:
paths_to_relocate = [old_spack_prefix, old_layout_root]
paths_to_relocate.extend(prefix_to_hash.keys())
files_to_relocate = list(filter(
lambda pathname: not relocate.file_is_relocatable(
pathname, paths_to_relocate=paths_to_relocate),
map(lambda filename: os.path.join(workdir, filename),
buildinfo['relocate_binaries'])))
# If the buildcache was not created with relativized rpaths
# do the relocation of path in binaries
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.relocate_macho_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.relocate_elf_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
# Relocate links to the new install prefix
link_names = [linkname
for linkname in buildinfo.get('relocate_links', [])]
relocate.relocate_links(link_names,
old_layout_root,
new_layout_root,
old_prefix,
new_prefix,
prefix_to_prefix)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
force=False):
"""
extract binary tarball for given package into install area
"""
if os.path.exists(spec.prefix):
if force:
shutil.rmtree(spec.prefix)
else:
raise NoOverwriteException(str(spec.prefix))
tmpdir = tempfile.mkdtemp()
stagepath = os.path.dirname(filename)
spackfile_name = tarball_name(spec, '.spack')
spackfile_path = os.path.join(stagepath, spackfile_name)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_path = os.path.join(tmpdir, tarfile_name)
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.join(tmpdir, specfile_name)
with closing(tarfile.open(spackfile_path, 'r')) as tar:
tar.extractall(tmpdir)
# some buildcache tarfiles use bzip2 compression
if not os.path.exists(tarfile_path):
tarfile_name = tarball_name(spec, '.tar.bz2')
tarfile_path = os.path.join(tmpdir, tarfile_name)
if not unsigned:
if os.path.exists('%s.asc' % specfile_path):
try:
suppress = config.get('config:suppress_gpg_warnings', False)
Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
except Exception as e:
shutil.rmtree(tmpdir)
raise e
else:
shutil.rmtree(tmpdir)
raise NoVerifyException(
"Package spec file failed signature verification.\n"
"Use spack buildcache keys to download "
"and install a key for verification from the mirror.")
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# get the sha256 checksum recorded at creation
spec_dict = {}
with open(specfile_path, 'r') as inputfile:
content = inputfile.read()
spec_dict = syaml.load(content)
bchecksum = spec_dict['binary_cache_checksum']
# if the checksums don't match don't install
if bchecksum['hash'] != checksum:
shutil.rmtree(tmpdir)
raise NoChecksumException(
"Package tarball failed checksum verification.\n"
"It cannot be installed.")
new_relative_prefix = str(os.path.relpath(spec.prefix,
spack.store.layout.root))
# if the original relative prefix is in the spec file use it
buildinfo = spec_dict.get('buildinfo', {})
old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
rel = buildinfo.get('relative_rpaths')
# if the original relative prefix and new relative prefix differ the
# directory layout has changed and the buildcache cannot be installed
# if it was created with relative rpaths
info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s'
tty.debug(info %
(old_relative_prefix, new_relative_prefix, rel))
# if (old_relative_prefix != new_relative_prefix and (rel)):
# shutil.rmtree(tmpdir)
# msg = "Package tarball was created from an install "
# msg += "prefix with a different directory layout. "
# msg += "It cannot be relocated because it "
# msg += "uses relative rpaths."
# raise NewLayoutException(msg)
# extract the tarball in a temp directory
with closing(tarfile.open(tarfile_path, 'r')) as tar:
tar.extractall(path=tmpdir)
# get the parent directory of the file .spack/binary_distribution
# this should the directory unpacked from the tarball whose
# name is unknown because the prefix naming is unknown
bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0]
workdir = re.sub('/.spack/binary_distribution$', '', bindist_file)
tty.debug('workdir %s' % workdir)
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % workdir,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(spec.prefix)
os.remove(temp_tarfile_path)
# cleanup
os.remove(tarfile_path)
os.remove(specfile_path)
try:
relocate_package(spec, allow_root)
except Exception as e:
shutil.rmtree(spec.prefix)
raise e
else:
manifest_file = os.path.join(spec.prefix,
spack.store.layout.metadata_dir,
spack.store.layout.manifest_file_name)
if not os.path.exists(manifest_file):
spec_id = spec.format('{name}/{hash:7}')
tty.warn('No manifest file in tarball for spec %s' % spec_id)
finally:
shutil.rmtree(tmpdir)
if os.path.exists(filename):
os.remove(filename)
# Internal cache for downloaded specs
_cached_specs = set()
def try_download_specs(urls=None, force=False):
'''
Try to download the urls and cache them
'''
global _cached_specs
if urls is None:
return {}
for link in urls:
with Stage(link, name="build_cache", keep=True) as stage:
if force and os.path.exists(stage.save_filename):
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
with open(stage.save_filename, 'r') as f:
# read the spec from the build cache file. All specs
# in build caches are concrete (as they are built) so
# we need to mark this spec concrete on read-in.
spec = Spec.from_yaml(f)
spec._mark_concrete()
_cached_specs.add(spec)
return _cached_specs
def get_spec(spec=None, force=False):
"""
Check if spec.yaml exists on mirrors and return it if it does
"""
global _cached_specs
urls = set()
if spec is None:
return {}
specfile_name = tarball_name(spec, '.spec.yaml')
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
if _cached_specs and spec in _cached_specs:
return _cached_specs
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
link = url_util.join(fetch_url_build_cache, specfile_name)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
link = url_util.join(fetch_url_build_cache, specfile_name)
urls.add(link)
return try_download_specs(urls=urls, force=force)
def get_specs(force=False, allarch=False):
"""
Get spec.yaml's for build caches available on mirror
"""
arch = architecture.Arch(architecture.platform(),
'default_os', 'default_target')
arch_pattern = ('([^-]*-[^-]*-[^-]*)')
if not allarch:
arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)
regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern)
arch_re = re.compile(regex_pattern)
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
urls = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
if os.path.exists(mirror_dir):
files = os.listdir(mirror_dir)
for file in files:
m = arch_re.search(file)
if m:
link = url_util.join(fetch_url_build_cache, file)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'))
for link in links:
m = arch_re.search(link)
if m:
urls.add(link)
return try_download_specs(urls=urls, force=force)
def get_keys(install=False, trust=False, force=False):
"""
Get pgp public keys available on mirror
with suffix .key or .pub
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of build caches.")
keys = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding public keys in %s" % mirror_dir)
files = os.listdir(str(mirror_dir))
for file in files:
if re.search(r'\.key', file) or re.search(r'\.pub', file):
link = url_util.join(fetch_url_build_cache, file)
keys.add(link)
else:
tty.msg("Finding public keys at %s" %
url_util.format(fetch_url_build_cache))
# For s3 mirror need to request index.html directly
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'), depth=1)
for link in links:
if re.search(r'\.key', link) or re.search(r'\.pub', link):
keys.add(link)
for link in keys:
with Stage(link, name="build_cache", keep=True) as stage:
if os.path.exists(stage.save_filename) and force:
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
tty.msg('Found key %s' % link)
if install:
if trust:
Gpg.trust(stage.save_filename)
tty.msg('Added this key to trusted keys.')
else:
tty.msg('Will not add this key to trusted keys.'
'Use -t to install all downloaded keys')
def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
if not spec.concrete:
raise ValueError('spec must be concrete to check against mirror')
pkg_name = spec.name
pkg_version = spec.version
pkg_hash = spec.dag_hash()
pkg_full_hash = spec.full_hash()
tty.debug('Checking {0}-{1}, dag_hash = {2}, full_hash = {3}'.format(
pkg_name, pkg_version, pkg_hash, pkg_full_hash))
tty.debug(spec.tree())
# Try to retrieve the .spec.yaml directly, based on the known
# format of the name, in order to determine if the package
# needs to be rebuilt.
cache_prefix = build_cache_prefix(mirror_url)
spec_yaml_file_name = tarball_name(spec, '.spec.yaml')
file_path = os.path.join(cache_prefix, spec_yaml_file_name)
result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
spec.short_spec, '' if rebuild_on_errors else 'not ')
try:
_, _, yaml_file = web_util.read_from_url(file_path)
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
except (URLError, web_util.SpackWebError) as url_err:
err_msg = [
'Unable to determine whether {0} needs rebuilding,',
' caught exception attempting to read from {1}.',
]
tty.error(''.join(err_msg).format(spec.short_spec, file_path))
tty.debug(url_err)
tty.warn(result_of_error)
return rebuild_on_errors
if not yaml_contents:
tty.error('Reading {0} returned nothing'.format(file_path))
tty.warn(result_of_error)
return rebuild_on_errors
spec_yaml = syaml.load(yaml_contents)
# If either the full_hash didn't exist in the .spec.yaml file, or it
# did, but didn't match the one we computed locally, then we should
# just rebuild. This can be simplified once the dag_hash and the
# full_hash become the same thing.
if ('full_hash' not in spec_yaml or
spec_yaml['full_hash'] != pkg_full_hash):
if 'full_hash' in spec_yaml:
reason = 'hash mismatch, remote = {0}, local = {1}'.format(
spec_yaml['full_hash'], pkg_full_hash)
else:
reason = 'full_hash was missing from remote spec.yaml'
tty.msg('Rebuilding {0}, reason: {1}'.format(
spec.short_spec, reason))
tty.msg(spec.tree())
return True
return False
# MASKED: check_specs_against_mirrors function (lines 994-1037)
def _download_buildcache_entry(mirror_root, descriptions):
for description in descriptions:
description_url = os.path.join(mirror_root, description['url'])
path = description['path']
fail_if_missing = description['required']
mkdirp(path)
stage = Stage(
description_url, name="build_cache", path=path, keep=True)
try:
stage.fetch()
except fs.FetchError as e:
tty.debug(e)
if fail_if_missing:
tty.error('Failed to download required url {0}'.format(
description_url))
return False
return True
def download_buildcache_entry(file_descriptions, mirror_url=None):
if not mirror_url and not spack.mirror.MirrorCollection():
tty.die("Please provide or add a spack mirror to allow " +
"download of buildcache entries.")
if mirror_url:
mirror_root = os.path.join(
mirror_url, _build_cache_relative_path)
return _download_buildcache_entry(mirror_root, file_descriptions)
for mirror in spack.mirror.MirrorCollection().values():
mirror_root = os.path.join(
mirror.fetch_url,
_build_cache_relative_path)
if _download_buildcache_entry(mirror_root, file_descriptions):
return True
else:
continue
return False
|
def check_specs_against_mirrors(mirrors, specs, output_file=None,
rebuild_on_errors=False):
"""Check all the given specs against buildcaches on the given mirrors and
determine if any of the specs need to be rebuilt. Reasons for needing to
rebuild include binary cache for spec isn't present on a mirror, or it is
present but the full_hash has changed since last time spec was built.
Arguments:
mirrors (dict): Mirrors to check against
specs (iterable): Specs to check against mirrors
output_file (string): Path to output file to be written. If provided,
mirrors with missing or out-of-date specs will be formatted as a
JSON object and written to this file.
rebuild_on_errors (boolean): Treat any errors encountered while
checking specs as a signal to rebuild package.
Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
"""
rebuilds = {}
for mirror in spack.mirror.MirrorCollection(mirrors).values():
tty.msg('Checking for built specs at %s' % mirror.fetch_url)
rebuild_list = []
for spec in specs:
if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors):
rebuild_list.append({
'short_spec': spec.short_spec,
'hash': spec.dag_hash()
})
if rebuild_list:
rebuilds[mirror.fetch_url] = {
'mirrorName': mirror.name,
'mirrorUrl': mirror.fetch_url,
'rebuildSpecs': rebuild_list
}
if output_file:
with open(output_file, 'w') as outf:
outf.write(json.dumps(rebuilds))
return 1 if rebuilds else 0
| 994
| 1,037
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import codecs
import os
import re
import tarfile
import shutil
import tempfile
import hashlib
import glob
import platform
from contextlib import closing
import ruamel.yaml as yaml
import json
from six.moves.urllib.error import URLError
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
import spack.cmd
import spack.config as config
import spack.fetch_strategy as fs
import spack.util.gpg
import spack.relocate as relocate
import spack.util.spack_yaml as syaml
import spack.mirror
import spack.util.url as url_util
import spack.util.web as web_util
from spack.spec import Spec
from spack.stage import Stage
from spack.util.gpg import Gpg
import spack.architecture as architecture
_build_cache_relative_path = 'build_cache'
BUILD_CACHE_INDEX_TEMPLATE = '''
<html>
<head>
<title>{title}</title>
</head>
<body>
<ul>
{path_list}
</ul>
</body>
</html>
'''
BUILD_CACHE_INDEX_ENTRY_TEMPLATE = ' <li><a href="{path}">{path}</a></li>'
class NoOverwriteException(spack.error.SpackError):
"""
Raised when a file exists and must be overwritten.
"""
def __init__(self, file_path):
err_msg = "\n%s\nexists\n" % file_path
err_msg += "Use -f option to overwrite."
super(NoOverwriteException, self).__init__(err_msg)
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super(NoGpgException, self).__init__(msg)
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
def __init__(self, msg):
super(NoKeyException, self).__init__(msg)
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multiple keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super(PickKeyException, self).__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
pass
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
def __init__(self, msg):
super(NewLayoutException, self).__init__(msg)
def build_cache_relative_path():
return _build_cache_relative_path
def build_cache_prefix(prefix):
return os.path.join(prefix, build_cache_relative_path())
def buildinfo_file_name(prefix):
"""
Filename of the binary package meta-data file
"""
name = os.path.join(prefix, ".spack/binary_distribution")
return name
def read_buildinfo_file(prefix):
"""
Read buildinfo file
"""
filename = buildinfo_file_name(prefix)
with open(filename, 'r') as inputfile:
content = inputfile.read()
buildinfo = yaml.load(content)
return buildinfo
def write_buildinfo_file(spec, workdir, rel=False):
"""
Create a cache file containing information
required for the relocation
"""
prefix = spec.prefix
text_to_relocate = []
binary_to_relocate = []
link_to_relocate = []
blacklist = (".spack", "man")
prefix_to_hash = dict()
prefix_to_hash[str(spec.package.prefix)] = spec.dag_hash()
deps = spack.build_environment.get_rpath_deps(spec.package)
for d in deps:
prefix_to_hash[str(d.prefix)] = d.dag_hash()
# Do this at during tarball creation to save time when tarball unpacked.
# Used by make_package_relative to determine binaries to change.
for root, dirs, files in os.walk(prefix, topdown=True):
dirs[:] = [d for d in dirs if d not in blacklist]
for filename in files:
path_name = os.path.join(root, filename)
m_type, m_subtype = relocate.mime_type(path_name)
if os.path.islink(path_name):
link = os.readlink(path_name)
if os.path.isabs(link):
# Relocate absolute links into the spack tree
if link.startswith(spack.store.layout.root):
rel_path_name = os.path.relpath(path_name, prefix)
link_to_relocate.append(rel_path_name)
else:
msg = 'Absolute link %s to %s ' % (path_name, link)
msg += 'outside of prefix %s ' % prefix
msg += 'should not be relocated.'
tty.warn(msg)
if relocate.needs_binary_relocation(m_type, m_subtype):
if not filename.endswith('.o'):
rel_path_name = os.path.relpath(path_name, prefix)
binary_to_relocate.append(rel_path_name)
if relocate.needs_text_relocation(m_type, m_subtype):
rel_path_name = os.path.relpath(path_name, prefix)
text_to_relocate.append(rel_path_name)
# Create buildinfo data and write it to disk
buildinfo = {}
buildinfo['relative_rpaths'] = rel
buildinfo['buildpath'] = spack.store.layout.root
buildinfo['spackprefix'] = spack.paths.prefix
buildinfo['relative_prefix'] = os.path.relpath(
prefix, spack.store.layout.root)
buildinfo['relocate_textfiles'] = text_to_relocate
buildinfo['relocate_binaries'] = binary_to_relocate
buildinfo['relocate_links'] = link_to_relocate
buildinfo['prefix_to_hash'] = prefix_to_hash
filename = buildinfo_file_name(workdir)
with open(filename, 'w') as outfile:
outfile.write(syaml.dump(buildinfo, default_flow_style=True))
def tarball_directory_name(spec):
"""
Return name of the tarball directory according to the convention
<os>-<architecture>/<compiler>/<package>-<version>/
"""
return "%s/%s/%s-%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name, spec.version)
def tarball_name(spec, ext):
"""
Return the name of the tarfile according to the convention
<os>-<architecture>-<package>-<dag_hash><ext>
"""
return "%s-%s-%s-%s-%s%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name,
spec.version,
spec.dag_hash(),
ext)
def tarball_path_name(spec, ext):
"""
Return the full path+name for a given spec according to the convention
<tarball_directory_name>/<tarball_name>
"""
return os.path.join(tarball_directory_name(spec),
tarball_name(spec, ext))
def checksum_tarball(file):
# calculate sha256 hash of tar file
block_size = 65536
hasher = hashlib.sha256()
with open(file, 'rb') as tfile:
buf = tfile.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = tfile.read(block_size)
return hasher.hexdigest()
def sign_tarball(key, force, specfile_path):
# Sign the packages if keys available
if spack.util.gpg.Gpg.gpg() is None:
raise NoGpgException(
"gpg2 is not available in $PATH .\n"
"Use spack install gnupg and spack load gnupg.")
if key is None:
keys = Gpg.signing_keys()
if len(keys) == 1:
key = keys[0]
if len(keys) > 1:
raise PickKeyException(str(keys))
if len(keys) == 0:
msg = "No default key available for signing.\n"
msg += "Use spack gpg init and spack gpg create"
msg += " to create a default key."
raise NoKeyException(msg)
if os.path.exists('%s.asc' % specfile_path):
if force:
os.remove('%s.asc' % specfile_path)
else:
raise NoOverwriteException('%s.asc' % specfile_path)
Gpg.sign(key, specfile_path, '%s.asc' % specfile_path)
def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.html" page at the location given in
cache_prefix. This page contains a link for each binary package (*.yaml)
and public key (*.key) under cache_prefix.
"""
tmpdir = tempfile.mkdtemp()
try:
index_html_path = os.path.join(tmpdir, 'index.html')
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if (entry.endswith('.yaml')
or entry.endswith('.key')))
with open(index_html_path, 'w') as f:
f.write(BUILD_CACHE_INDEX_TEMPLATE.format(
title='Spack Package Index',
path_list='\n'.join(
BUILD_CACHE_INDEX_ENTRY_TEMPLATE.format(path=path)
for path in file_list)))
web_util.push_to_url(
index_html_path,
url_util.join(cache_prefix, 'index.html'),
keep_original=False,
extra_args={'ContentType': 'text/html'})
finally:
shutil.rmtree(tmpdir)
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
allow_root=False, key=None, regenerate_index=False):
"""
Build a tarball from given spec and put it into the directory structure
used at the mirror (following <tarball_directory_name>).
"""
if not spec.concrete:
raise ValueError('spec must be concrete to build tarball')
# set up some paths
tmpdir = tempfile.mkdtemp()
cache_prefix = build_cache_prefix(tmpdir)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_dir = os.path.join(cache_prefix, tarball_directory_name(spec))
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
spackfile_path = os.path.join(
cache_prefix, tarball_path_name(spec, '.spack'))
remote_spackfile_path = url_util.join(
outdir, os.path.relpath(spackfile_path, tmpdir))
mkdirp(tarfile_dir)
if web_util.url_exists(remote_spackfile_path):
if force:
web_util.remove_url(remote_spackfile_path)
else:
raise NoOverwriteException(url_util.format(remote_spackfile_path))
# need to copy the spec file so the build cache can be downloaded
# without concretizing with the current spack packages
# and preferences
spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.realpath(
os.path.join(cache_prefix, specfile_name))
remote_specfile_path = url_util.join(
outdir, os.path.relpath(specfile_path, os.path.realpath(tmpdir)))
if web_util.url_exists(remote_specfile_path):
if force:
web_util.remove_url(remote_specfile_path)
else:
raise NoOverwriteException(url_util.format(remote_specfile_path))
# make a copy of the install directory to work with
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tarfile_dir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % spec.prefix,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(workdir)
os.remove(temp_tarfile_path)
# create info for later relocation and create tar
write_buildinfo_file(spec, workdir, rel)
# optionally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
if rel:
try:
make_package_relative(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
else:
try:
check_package_relocatable(workdir, spec, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
shutil.rmtree(tmpdir)
tty.die(e)
# create gzip compressed tarball of the install prefix
with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
tar.add(name='%s' % workdir,
arcname='%s' % os.path.basename(spec.prefix))
# remove copy of install directory
shutil.rmtree(workdir)
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# add sha256 checksum to spec.yaml
with open(spec_file, 'r') as inputfile:
content = inputfile.read()
spec_dict = yaml.load(content)
bchecksum = {}
bchecksum['hash_algorithm'] = 'sha256'
bchecksum['hash'] = checksum
spec_dict['binary_cache_checksum'] = bchecksum
# Add original install prefix relative to layout root to spec.yaml.
# This will be used to determine is the directory layout has changed.
buildinfo = {}
buildinfo['relative_prefix'] = os.path.relpath(
spec.prefix, spack.store.layout.root)
buildinfo['relative_rpaths'] = rel
spec_dict['buildinfo'] = buildinfo
spec_dict['full_hash'] = spec.full_hash()
tty.debug('The full_hash ({0}) of {1} will be written into {2}'.format(
spec_dict['full_hash'],
spec.name,
url_util.format(remote_specfile_path)))
tty.debug(spec.tree())
with open(specfile_path, 'w') as outfile:
outfile.write(syaml.dump(spec_dict))
# sign the tarball and spec file with gpg
if not unsigned:
sign_tarball(key, force, specfile_path)
# put tarball, spec and signature files in .spack archive
with closing(tarfile.open(spackfile_path, 'w')) as tar:
tar.add(name=tarfile_path, arcname='%s' % tarfile_name)
tar.add(name=specfile_path, arcname='%s' % specfile_name)
if not unsigned:
tar.add(name='%s.asc' % specfile_path,
arcname='%s.asc' % specfile_name)
# cleanup file moved to archive
os.remove(tarfile_path)
if not unsigned:
os.remove('%s.asc' % specfile_path)
web_util.push_to_url(
spackfile_path, remote_spackfile_path, keep_original=False)
web_util.push_to_url(
specfile_path, remote_specfile_path, keep_original=False)
tty.msg('Buildache for "%s" written to \n %s' %
(spec, remote_spackfile_path))
try:
# create an index.html for the build_cache directory so specs can be
# found
if regenerate_index:
generate_package_index(url_util.join(
outdir, os.path.relpath(cache_prefix, tmpdir)))
finally:
shutil.rmtree(tmpdir)
return None
def download_tarball(spec):
"""
Download binary tarball for given package into stage area
Return True if successful
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of pre-compiled packages.")
tarball = tarball_path_name(spec, '.spack')
for mirror in spack.mirror.MirrorCollection().values():
url = url_util.join(
mirror.fetch_url, _build_cache_relative_path, tarball)
# stage the tarball into standard place
stage = Stage(url, name="build_cache", keep=True)
try:
stage.fetch()
return stage.save_filename
except fs.FetchError:
continue
return None
def make_package_relative(workdir, spec, allow_root):
"""
Change paths in binaries to relative paths. Change absolute symlinks
to relative symlinks.
"""
prefix = spec.prefix
buildinfo = read_buildinfo_file(workdir)
old_layout_root = buildinfo['buildpath']
orig_path_names = list()
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
orig_path_names.append(os.path.join(prefix, filename))
cur_path_names.append(os.path.join(workdir, filename))
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.make_macho_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.make_elf_binaries_relative(cur_path_names, orig_path_names,
old_layout_root)
relocate.check_files_relocatable(cur_path_names, allow_root)
orig_path_names = list()
cur_path_names = list()
for linkname in buildinfo.get('relocate_links', []):
orig_path_names.append(os.path.join(prefix, linkname))
cur_path_names.append(os.path.join(workdir, linkname))
relocate.make_link_relative(cur_path_names, orig_path_names)
def check_package_relocatable(workdir, spec, allow_root):
"""
Check if package binaries are relocatable.
Change links to placeholder links.
"""
buildinfo = read_buildinfo_file(workdir)
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
cur_path_names.append(os.path.join(workdir, filename))
relocate.check_files_relocatable(cur_path_names, allow_root)
def relocate_package(spec, allow_root):
"""
Relocate the given package
"""
workdir = str(spec.prefix)
buildinfo = read_buildinfo_file(workdir)
new_layout_root = str(spack.store.layout.root)
new_prefix = str(spec.prefix)
new_rel_prefix = str(os.path.relpath(new_prefix, new_layout_root))
new_spack_prefix = str(spack.paths.prefix)
old_layout_root = str(buildinfo['buildpath'])
old_spack_prefix = str(buildinfo.get('spackprefix'))
old_rel_prefix = buildinfo.get('relative_prefix')
old_prefix = os.path.join(old_layout_root, old_rel_prefix)
rel = buildinfo.get('relative_rpaths')
prefix_to_hash = buildinfo.get('prefix_to_hash', None)
if (old_rel_prefix != new_rel_prefix and not prefix_to_hash):
msg = "Package tarball was created from an install "
msg += "prefix with a different directory layout and an older "
msg += "buildcache create implementation. It cannot be relocated."
raise NewLayoutException(msg)
# older buildcaches do not have the prefix_to_hash dictionary
# need to set an empty dictionary and add one entry to
# prefix_to_prefix to reproduce the old behavior
if not prefix_to_hash:
prefix_to_hash = dict()
hash_to_prefix = dict()
hash_to_prefix[spec.format('{hash}')] = str(spec.package.prefix)
new_deps = spack.build_environment.get_rpath_deps(spec.package)
for d in new_deps:
hash_to_prefix[d.format('{hash}')] = str(d.prefix)
prefix_to_prefix = dict()
for orig_prefix, hash in prefix_to_hash.items():
prefix_to_prefix[orig_prefix] = hash_to_prefix.get(hash, None)
prefix_to_prefix[old_prefix] = new_prefix
prefix_to_prefix[old_layout_root] = new_layout_root
tty.debug("Relocating package from",
"%s to %s." % (old_layout_root, new_layout_root))
def is_backup_file(file):
return file.endswith('~')
# Text files containing the prefix text
text_names = list()
for filename in buildinfo['relocate_textfiles']:
text_name = os.path.join(workdir, filename)
# Don't add backup files generated by filter_file during install step.
if not is_backup_file(text_name):
text_names.append(text_name)
# If we are installing back to the same location don't replace anything
if old_layout_root != new_layout_root:
paths_to_relocate = [old_spack_prefix, old_layout_root]
paths_to_relocate.extend(prefix_to_hash.keys())
files_to_relocate = list(filter(
lambda pathname: not relocate.file_is_relocatable(
pathname, paths_to_relocate=paths_to_relocate),
map(lambda filename: os.path.join(workdir, filename),
buildinfo['relocate_binaries'])))
# If the buildcache was not created with relativized rpaths
# do the relocation of path in binaries
if (spec.architecture.platform == 'darwin' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'darwin'):
relocate.relocate_macho_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
if (spec.architecture.platform == 'linux' or
spec.architecture.platform == 'test' and
platform.system().lower() == 'linux'):
relocate.relocate_elf_binaries(files_to_relocate,
old_layout_root,
new_layout_root,
prefix_to_prefix, rel,
old_prefix,
new_prefix)
# Relocate links to the new install prefix
link_names = [linkname
for linkname in buildinfo.get('relocate_links', [])]
relocate.relocate_links(link_names,
old_layout_root,
new_layout_root,
old_prefix,
new_prefix,
prefix_to_prefix)
# For all buildcaches
# relocate the install prefixes in text files including dependencies
relocate.relocate_text(text_names,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
# relocate the install prefixes in binary files including dependencies
relocate.relocate_text_bin(files_to_relocate,
old_layout_root, new_layout_root,
old_prefix, new_prefix,
old_spack_prefix,
new_spack_prefix,
prefix_to_prefix)
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
force=False):
"""
extract binary tarball for given package into install area
"""
if os.path.exists(spec.prefix):
if force:
shutil.rmtree(spec.prefix)
else:
raise NoOverwriteException(str(spec.prefix))
tmpdir = tempfile.mkdtemp()
stagepath = os.path.dirname(filename)
spackfile_name = tarball_name(spec, '.spack')
spackfile_path = os.path.join(stagepath, spackfile_name)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_path = os.path.join(tmpdir, tarfile_name)
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.join(tmpdir, specfile_name)
with closing(tarfile.open(spackfile_path, 'r')) as tar:
tar.extractall(tmpdir)
# some buildcache tarfiles use bzip2 compression
if not os.path.exists(tarfile_path):
tarfile_name = tarball_name(spec, '.tar.bz2')
tarfile_path = os.path.join(tmpdir, tarfile_name)
if not unsigned:
if os.path.exists('%s.asc' % specfile_path):
try:
suppress = config.get('config:suppress_gpg_warnings', False)
Gpg.verify('%s.asc' % specfile_path, specfile_path, suppress)
except Exception as e:
shutil.rmtree(tmpdir)
raise e
else:
shutil.rmtree(tmpdir)
raise NoVerifyException(
"Package spec file failed signature verification.\n"
"Use spack buildcache keys to download "
"and install a key for verification from the mirror.")
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# get the sha256 checksum recorded at creation
spec_dict = {}
with open(specfile_path, 'r') as inputfile:
content = inputfile.read()
spec_dict = syaml.load(content)
bchecksum = spec_dict['binary_cache_checksum']
# if the checksums don't match don't install
if bchecksum['hash'] != checksum:
shutil.rmtree(tmpdir)
raise NoChecksumException(
"Package tarball failed checksum verification.\n"
"It cannot be installed.")
new_relative_prefix = str(os.path.relpath(spec.prefix,
spack.store.layout.root))
# if the original relative prefix is in the spec file use it
buildinfo = spec_dict.get('buildinfo', {})
old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
rel = buildinfo.get('relative_rpaths')
# if the original relative prefix and new relative prefix differ the
# directory layout has changed and the buildcache cannot be installed
# if it was created with relative rpaths
info = 'old relative prefix %s\nnew relative prefix %s\nrelative rpaths %s'
tty.debug(info %
(old_relative_prefix, new_relative_prefix, rel))
# if (old_relative_prefix != new_relative_prefix and (rel)):
# shutil.rmtree(tmpdir)
# msg = "Package tarball was created from an install "
# msg += "prefix with a different directory layout. "
# msg += "It cannot be relocated because it "
# msg += "uses relative rpaths."
# raise NewLayoutException(msg)
# extract the tarball in a temp directory
with closing(tarfile.open(tarfile_path, 'r')) as tar:
tar.extractall(path=tmpdir)
# get the parent directory of the file .spack/binary_distribution
# this should the directory unpacked from the tarball whose
# name is unknown because the prefix naming is unknown
bindist_file = glob.glob('%s/*/.spack/binary_distribution' % tmpdir)[0]
workdir = re.sub('/.spack/binary_distribution$', '', bindist_file)
tty.debug('workdir %s' % workdir)
# install_tree copies hardlinks
# create a temporary tarfile from prefix and exract it to workdir
# tarfile preserves hardlinks
temp_tarfile_name = tarball_name(spec, '.tar')
temp_tarfile_path = os.path.join(tmpdir, temp_tarfile_name)
with closing(tarfile.open(temp_tarfile_path, 'w')) as tar:
tar.add(name='%s' % workdir,
arcname='.')
with closing(tarfile.open(temp_tarfile_path, 'r')) as tar:
tar.extractall(spec.prefix)
os.remove(temp_tarfile_path)
# cleanup
os.remove(tarfile_path)
os.remove(specfile_path)
try:
relocate_package(spec, allow_root)
except Exception as e:
shutil.rmtree(spec.prefix)
raise e
else:
manifest_file = os.path.join(spec.prefix,
spack.store.layout.metadata_dir,
spack.store.layout.manifest_file_name)
if not os.path.exists(manifest_file):
spec_id = spec.format('{name}/{hash:7}')
tty.warn('No manifest file in tarball for spec %s' % spec_id)
finally:
shutil.rmtree(tmpdir)
if os.path.exists(filename):
os.remove(filename)
# Internal cache for downloaded specs
_cached_specs = set()
def try_download_specs(urls=None, force=False):
'''
Try to download the urls and cache them
'''
global _cached_specs
if urls is None:
return {}
for link in urls:
with Stage(link, name="build_cache", keep=True) as stage:
if force and os.path.exists(stage.save_filename):
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
with open(stage.save_filename, 'r') as f:
# read the spec from the build cache file. All specs
# in build caches are concrete (as they are built) so
# we need to mark this spec concrete on read-in.
spec = Spec.from_yaml(f)
spec._mark_concrete()
_cached_specs.add(spec)
return _cached_specs
def get_spec(spec=None, force=False):
"""
Check if spec.yaml exists on mirrors and return it if it does
"""
global _cached_specs
urls = set()
if spec is None:
return {}
specfile_name = tarball_name(spec, '.spec.yaml')
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
if _cached_specs and spec in _cached_specs:
return _cached_specs
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
link = url_util.join(fetch_url_build_cache, specfile_name)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
link = url_util.join(fetch_url_build_cache, specfile_name)
urls.add(link)
return try_download_specs(urls=urls, force=force)
def get_specs(force=False, allarch=False):
"""
Get spec.yaml's for build caches available on mirror
"""
arch = architecture.Arch(architecture.platform(),
'default_os', 'default_target')
arch_pattern = ('([^-]*-[^-]*-[^-]*)')
if not allarch:
arch_pattern = '(%s-%s-[^-]*)' % (arch.platform, arch.os)
regex_pattern = '%s(.*)(spec.yaml$)' % (arch_pattern)
arch_re = re.compile(regex_pattern)
if not spack.mirror.MirrorCollection():
tty.debug("No Spack mirrors are currently configured")
return {}
urls = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding buildcaches in %s" % mirror_dir)
if os.path.exists(mirror_dir):
files = os.listdir(mirror_dir)
for file in files:
m = arch_re.search(file)
if m:
link = url_util.join(fetch_url_build_cache, file)
urls.add(link)
else:
tty.msg("Finding buildcaches at %s" %
url_util.format(fetch_url_build_cache))
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'))
for link in links:
m = arch_re.search(link)
if m:
urls.add(link)
return try_download_specs(urls=urls, force=force)
def get_keys(install=False, trust=False, force=False):
"""
Get pgp public keys available on mirror
with suffix .key or .pub
"""
if not spack.mirror.MirrorCollection():
tty.die("Please add a spack mirror to allow " +
"download of build caches.")
keys = set()
for mirror in spack.mirror.MirrorCollection().values():
fetch_url_build_cache = url_util.join(
mirror.fetch_url, _build_cache_relative_path)
mirror_dir = url_util.local_file_path(fetch_url_build_cache)
if mirror_dir:
tty.msg("Finding public keys in %s" % mirror_dir)
files = os.listdir(str(mirror_dir))
for file in files:
if re.search(r'\.key', file) or re.search(r'\.pub', file):
link = url_util.join(fetch_url_build_cache, file)
keys.add(link)
else:
tty.msg("Finding public keys at %s" %
url_util.format(fetch_url_build_cache))
# For s3 mirror need to request index.html directly
p, links = web_util.spider(
url_util.join(fetch_url_build_cache, 'index.html'), depth=1)
for link in links:
if re.search(r'\.key', link) or re.search(r'\.pub', link):
keys.add(link)
for link in keys:
with Stage(link, name="build_cache", keep=True) as stage:
if os.path.exists(stage.save_filename) and force:
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
tty.msg('Found key %s' % link)
if install:
if trust:
Gpg.trust(stage.save_filename)
tty.msg('Added this key to trusted keys.')
else:
tty.msg('Will not add this key to trusted keys.'
'Use -t to install all downloaded keys')
def needs_rebuild(spec, mirror_url, rebuild_on_errors=False):
if not spec.concrete:
raise ValueError('spec must be concrete to check against mirror')
pkg_name = spec.name
pkg_version = spec.version
pkg_hash = spec.dag_hash()
pkg_full_hash = spec.full_hash()
tty.debug('Checking {0}-{1}, dag_hash = {2}, full_hash = {3}'.format(
pkg_name, pkg_version, pkg_hash, pkg_full_hash))
tty.debug(spec.tree())
# Try to retrieve the .spec.yaml directly, based on the known
# format of the name, in order to determine if the package
# needs to be rebuilt.
cache_prefix = build_cache_prefix(mirror_url)
spec_yaml_file_name = tarball_name(spec, '.spec.yaml')
file_path = os.path.join(cache_prefix, spec_yaml_file_name)
result_of_error = 'Package ({0}) will {1}be rebuilt'.format(
spec.short_spec, '' if rebuild_on_errors else 'not ')
try:
_, _, yaml_file = web_util.read_from_url(file_path)
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
except (URLError, web_util.SpackWebError) as url_err:
err_msg = [
'Unable to determine whether {0} needs rebuilding,',
' caught exception attempting to read from {1}.',
]
tty.error(''.join(err_msg).format(spec.short_spec, file_path))
tty.debug(url_err)
tty.warn(result_of_error)
return rebuild_on_errors
if not yaml_contents:
tty.error('Reading {0} returned nothing'.format(file_path))
tty.warn(result_of_error)
return rebuild_on_errors
spec_yaml = syaml.load(yaml_contents)
# If either the full_hash didn't exist in the .spec.yaml file, or it
# did, but didn't match the one we computed locally, then we should
# just rebuild. This can be simplified once the dag_hash and the
# full_hash become the same thing.
if ('full_hash' not in spec_yaml or
spec_yaml['full_hash'] != pkg_full_hash):
if 'full_hash' in spec_yaml:
reason = 'hash mismatch, remote = {0}, local = {1}'.format(
spec_yaml['full_hash'], pkg_full_hash)
else:
reason = 'full_hash was missing from remote spec.yaml'
tty.msg('Rebuilding {0}, reason: {1}'.format(
spec.short_spec, reason))
tty.msg(spec.tree())
return True
return False
def check_specs_against_mirrors(mirrors, specs, output_file=None,
rebuild_on_errors=False):
"""Check all the given specs against buildcaches on the given mirrors and
determine if any of the specs need to be rebuilt. Reasons for needing to
rebuild include binary cache for spec isn't present on a mirror, or it is
present but the full_hash has changed since last time spec was built.
Arguments:
mirrors (dict): Mirrors to check against
specs (iterable): Specs to check against mirrors
output_file (string): Path to output file to be written. If provided,
mirrors with missing or out-of-date specs will be formatted as a
JSON object and written to this file.
rebuild_on_errors (boolean): Treat any errors encountered while
checking specs as a signal to rebuild package.
Returns: 1 if any spec was out-of-date on any mirror, 0 otherwise.
"""
rebuilds = {}
for mirror in spack.mirror.MirrorCollection(mirrors).values():
tty.msg('Checking for built specs at %s' % mirror.fetch_url)
rebuild_list = []
for spec in specs:
if needs_rebuild(spec, mirror.fetch_url, rebuild_on_errors):
rebuild_list.append({
'short_spec': spec.short_spec,
'hash': spec.dag_hash()
})
if rebuild_list:
rebuilds[mirror.fetch_url] = {
'mirrorName': mirror.name,
'mirrorUrl': mirror.fetch_url,
'rebuildSpecs': rebuild_list
}
if output_file:
with open(output_file, 'w') as outf:
outf.write(json.dumps(rebuilds))
return 1 if rebuilds else 0
def _download_buildcache_entry(mirror_root, descriptions):
for description in descriptions:
description_url = os.path.join(mirror_root, description['url'])
path = description['path']
fail_if_missing = description['required']
mkdirp(path)
stage = Stage(
description_url, name="build_cache", path=path, keep=True)
try:
stage.fetch()
except fs.FetchError as e:
tty.debug(e)
if fail_if_missing:
tty.error('Failed to download required url {0}'.format(
description_url))
return False
return True
def download_buildcache_entry(file_descriptions, mirror_url=None):
if not mirror_url and not spack.mirror.MirrorCollection():
tty.die("Please provide or add a spack mirror to allow " +
"download of buildcache entries.")
if mirror_url:
mirror_root = os.path.join(
mirror_url, _build_cache_relative_path)
return _download_buildcache_entry(mirror_root, file_descriptions)
for mirror in spack.mirror.MirrorCollection().values():
mirror_root = os.path.join(
mirror.fetch_url,
_build_cache_relative_path)
if _download_buildcache_entry(mirror_root, file_descriptions):
return True
else:
continue
return False
|
get_model
|
Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
|
# pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from mxnet import gluon
from .ssd import *
from .faster_rcnn import *
from .fcn import *
from .pspnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .se_resnet import *
from .yolo import *
__all__ = ['get_model']
# MASKED: get_model function (lines 21-110)
|
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
models = {
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco,
'faster_rcnn_resnet50_v2a_voc': faster_rcnn_resnet50_v2a_voc,
'faster_rcnn_resnet50_v2a_coco': faster_rcnn_resnet50_v2a_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc' : get_fcn_voc_resnet50,
'fcn_resnet101_voc' : get_fcn_voc_resnet101,
'fcn_resnet50_ade' : get_fcn_ade_resnet50,
'psp_resnet50_ade' : get_psp_ade_resnet50,
'resnet18_v1b' : resnet18_v1b,
'resnet34_v1b' : resnet34_v1b,
'resnet50_v1b' : resnet50_v1b,
'resnet101_v1b' : resnet101_v1b,
'resnet152_v1b' : resnet152_v1b,
'resnet50_v2a': resnet50_v2a,
'resnext50_32x4d' : resnext50_32x4d,
'resnext101_32x4d' : resnext101_32x4d,
'resnext101_64x4d' : resnext101_64x4d,
'se_resnext50_32x4d' : se_resnext50_32x4d,
'se_resnext101_32x4d' : se_resnext101_32x4d,
'se_resnext101_64x4d' : se_resnext101_64x4d,
'senet_52' : senet_52,
'senet_103' : senet_103,
'senet_154' : senet_154,
'se_resnet18_v1' : se_resnet18_v1,
'se_resnet34_v1' : se_resnet34_v1,
'se_resnet50_v1' : se_resnet50_v1,
'se_resnet101_v1' : se_resnet101_v1,
'se_resnet152_v1' : se_resnet152_v1,
'se_resnet18_v2' : se_resnet18_v2,
'se_resnet34_v2' : se_resnet34_v2,
'se_resnet50_v2' : se_resnet50_v2,
'se_resnet101_v2' : se_resnet101_v2,
'se_resnet152_v2' : se_resnet152_v2,
'darknet53': darknet53,
'yolo3_416_darknet53_voc': yolo3_416_darknet53_voc,
'yolo3_416_darknet53_coco': yolo3_416_darknet53_coco,
}
try:
net = gluon.model_zoo.vision.get_model(name, **kwargs)
return net
except ValueError as e:
upstream_supported = str(e)
# avoid raising inside which cause a bit messy error message
name = name.lower()
if name not in models:
raise ValueError('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(models.keys()))))
net = models[name](**kwargs)
return net
| 21
| 110
|
# pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from mxnet import gluon
from .ssd import *
from .faster_rcnn import *
from .fcn import *
from .pspnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .se_resnet import *
from .yolo import *
__all__ = ['get_model']
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
models = {
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco,
'faster_rcnn_resnet50_v2a_voc': faster_rcnn_resnet50_v2a_voc,
'faster_rcnn_resnet50_v2a_coco': faster_rcnn_resnet50_v2a_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc' : get_fcn_voc_resnet50,
'fcn_resnet101_voc' : get_fcn_voc_resnet101,
'fcn_resnet50_ade' : get_fcn_ade_resnet50,
'psp_resnet50_ade' : get_psp_ade_resnet50,
'resnet18_v1b' : resnet18_v1b,
'resnet34_v1b' : resnet34_v1b,
'resnet50_v1b' : resnet50_v1b,
'resnet101_v1b' : resnet101_v1b,
'resnet152_v1b' : resnet152_v1b,
'resnet50_v2a': resnet50_v2a,
'resnext50_32x4d' : resnext50_32x4d,
'resnext101_32x4d' : resnext101_32x4d,
'resnext101_64x4d' : resnext101_64x4d,
'se_resnext50_32x4d' : se_resnext50_32x4d,
'se_resnext101_32x4d' : se_resnext101_32x4d,
'se_resnext101_64x4d' : se_resnext101_64x4d,
'senet_52' : senet_52,
'senet_103' : senet_103,
'senet_154' : senet_154,
'se_resnet18_v1' : se_resnet18_v1,
'se_resnet34_v1' : se_resnet34_v1,
'se_resnet50_v1' : se_resnet50_v1,
'se_resnet101_v1' : se_resnet101_v1,
'se_resnet152_v1' : se_resnet152_v1,
'se_resnet18_v2' : se_resnet18_v2,
'se_resnet34_v2' : se_resnet34_v2,
'se_resnet50_v2' : se_resnet50_v2,
'se_resnet101_v2' : se_resnet101_v2,
'se_resnet152_v2' : se_resnet152_v2,
'darknet53': darknet53,
'yolo3_416_darknet53_voc': yolo3_416_darknet53_voc,
'yolo3_416_darknet53_coco': yolo3_416_darknet53_coco,
}
try:
net = gluon.model_zoo.vision.get_model(name, **kwargs)
return net
except ValueError as e:
upstream_supported = str(e)
# avoid raising inside which cause a bit messy error message
name = name.lower()
if name not in models:
raise ValueError('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(models.keys()))))
net = models[name](**kwargs)
return net
|
contains
|
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
# MASKED: contains function (lines 164-212)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
| 164
| 212
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
_recode_for_categories
|
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
# MASKED: _recode_for_categories function (lines 2457-2486)
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
| 2,457
| 2,486
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
_factorize_from_iterable
|
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
# MASKED: _factorize_from_iterable function (lines 2504-2541)
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
| 2,504
| 2,541
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
astype
|
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
# MASKED: astype function (lines 506-528)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
| 506
| 528
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
_set_categories
|
Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
# MASKED: _set_categories function (lines 687-718)
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
| 687
| 718
|
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.